hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a71b65ab10cf126adef0ff97dec1b09a8f370a7 | 7,170 | py | Python | neutron_lbaas/common/cert_manager/local_cert_manager.py | gotostack/neutron-lbaas | aea6f1f3c512ef94c89210d919f3b807b907edbe | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/common/cert_manager/local_cert_manager.py | gotostack/neutron-lbaas | aea6f1f3c512ef94c89210d919f3b807b907edbe | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/common/cert_manager/local_cert_manager.py | gotostack/neutron-lbaas | aea6f1f3c512ef94c89210d919f3b807b907edbe | [
"Apache-2.0"
] | null | null | null | # Copyright 2014, 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
from neutron.i18n import _LI, _LE
from oslo_config import cfg
from oslo_log import log as logging
from neutron_lbaas.common.cert_manager import cert_manager
from neutron_lbaas.common import exceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TLS_STORAGE_DEFAULT = os.environ.get(
'OS_LBAAS_TLS_STORAGE', '/var/lib/neutron-lbaas/certificates/'
)
local_cert_manager_opts = [
cfg.StrOpt('storage_path',
default=TLS_STORAGE_DEFAULT,
help='Absolute path to the certificate storage directory. '
'Defaults to env[OS_LBAAS_TLS_STORAGE].')
]
CONF.register_opts(local_cert_manager_opts, group='certificates')
class Cert(cert_manager.Cert):
"""Representation of a Cert for local storage."""
def __init__(self, certificate, private_key, intermediates=None,
private_key_passphrase=None):
self.certificate = certificate
self.intermediates = intermediates
self.private_key = private_key
self.private_key_passphrase = private_key_passphrase
def get_certificate(self):
return self.certificate
def get_intermediates(self):
return self.intermediates
def get_private_key(self):
return self.private_key
def get_private_key_passphrase(self):
return self.private_key_passphrase
class CertManager(cert_manager.CertManager):
"""Cert Manager Interface that stores data locally."""
@staticmethod
def store_cert(certificate, private_key, intermediates=None,
private_key_passphrase=None, **kwargs):
"""Stores (i.e., registers) a cert with the cert manager.
This method stores the specified cert to the filesystem and returns
a UUID that can be used to retrieve it.
:param certificate: PEM encoded TLS certificate
:param private_key: private key for the supplied certificate
:param intermediates: ordered and concatenated intermediate certs
:param private_key_passphrase: optional passphrase for the supplied key
:returns: the UUID of the stored cert
:raises CertificateStorageException: if certificate storage fails
"""
cert_ref = str(uuid.uuid4())
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
LOG.info(_LI(
"Storing certificate data on the local filesystem."
))
try:
filename_certificate = "{0}.crt".format(filename_base)
with open(filename_certificate, 'w') as cert_file:
cert_file.write(certificate)
filename_private_key = "{0}.key".format(filename_base)
with open(filename_private_key, 'w') as key_file:
key_file.write(private_key)
if intermediates:
filename_intermediates = "{0}.int".format(filename_base)
with open(filename_intermediates, 'w') as int_file:
int_file.write(intermediates)
if private_key_passphrase:
filename_pkp = "{0}.pass".format(filename_base)
with open(filename_pkp, 'w') as pass_file:
pass_file.write(private_key_passphrase)
except IOError as ioe:
LOG.error(_LE("Failed to store certificate."))
raise exceptions.CertificateStorageException(message=ioe.message)
return cert_ref
@staticmethod
def get_cert(cert_ref, **kwargs):
"""Retrieves the specified cert.
:param cert_ref: the UUID of the cert to retrieve
:return: neutron_lbaas.common.cert_manager.cert_manager.Cert
representation of the certificate data
:raises CertificateStorageException: if certificate retrieval fails
"""
LOG.info(_LI(
"Loading certificate {0} from the local filesystem."
).format(cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
filename_certificate = "{0}.crt".format(filename_base)
filename_private_key = "{0}.key".format(filename_base)
filename_intermediates = "{0}.int".format(filename_base)
filename_pkp = "{0}.pass".format(filename_base)
cert_data = dict()
try:
with open(filename_certificate, 'r') as cert_file:
cert_data['certificate'] = cert_file.read()
except IOError:
LOG.error(_LE(
"Failed to read certificate for {0}."
).format(cert_ref))
raise exceptions.CertificateStorageException(
msg="Certificate could not be read."
)
try:
with open(filename_private_key, 'r') as key_file:
cert_data['private_key'] = key_file.read()
except IOError:
LOG.error(_LE(
"Failed to read private key for {0}."
).format(cert_ref))
raise exceptions.CertificateStorageException(
msg="Private Key could not be read."
)
try:
with open(filename_intermediates, 'r') as int_file:
cert_data['intermediates'] = int_file.read()
except IOError:
pass
try:
with open(filename_pkp, 'r') as pass_file:
cert_data['private_key_passphrase'] = pass_file.read()
except IOError:
pass
return Cert(**cert_data)
@staticmethod
def delete_cert(cert_ref, **kwargs):
"""Deletes the specified cert.
:param cert_ref: the UUID of the cert to delete
:raises CertificateStorageException: if certificate deletion fails
"""
LOG.info(_LI(
"Deleting certificate {0} from the local filesystem."
).format(cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
filename_certificate = "{0}.crt".format(filename_base)
filename_private_key = "{0}.key".format(filename_base)
filename_intermediates = "{0}.int".format(filename_base)
filename_pkp = "{0}.pass".format(filename_base)
try:
os.remove(filename_certificate)
os.remove(filename_private_key)
os.remove(filename_intermediates)
os.remove(filename_pkp)
except IOError as ioe:
LOG.error(_LE(
"Failed to delete certificate {0}."
).format(cert_ref))
raise exceptions.CertificateStorageException(message=ioe.message)
| 35.671642 | 79 | 0.645886 |
c13a56afb44b2c14451e8fa8f16c71dac2c9f8d8 | 205 | py | Python | selenium demo(test)/punit.py | Singularmotor/demo-sele | adf7073c83f0ae99c36a4f145de7826f9b467824 | [
"MIT"
] | null | null | null | selenium demo(test)/punit.py | Singularmotor/demo-sele | adf7073c83f0ae99c36a4f145de7826f9b467824 | [
"MIT"
] | null | null | null | selenium demo(test)/punit.py | Singularmotor/demo-sele | adf7073c83f0ae99c36a4f145de7826f9b467824 | [
"MIT"
] | null | null | null | #coding=utf-8
import unittest
class test(unittest.TestCase):
def testminus(self):
result=10/2
hope=6
self.assertEqual(result,hope)
if __name__=="__main__":
unittest.main() | 18.636364 | 37 | 0.653659 |
5bc7b4e3731d8972bae3b88032b3067e2da29050 | 4,778 | py | Python | bot/config.py | jejen-juanda/TTMediaBot | a97fe2d51585175458958f0ae4a7b7296b329cfc | [
"MIT"
] | null | null | null | bot/config.py | jejen-juanda/TTMediaBot | a97fe2d51585175458958f0ae4a7b7296b329cfc | [
"MIT"
] | null | null | null | bot/config.py | jejen-juanda/TTMediaBot | a97fe2d51585175458958f0ae4a7b7296b329cfc | [
"MIT"
] | null | null | null | import json
import sys
import portalocker
from bot import utils
default_config = {
"general": {
"language": "en",
"send_channel_messages": True,
"cache_file_name": "TTMediaBotCache.dat",
"blocked_commands": [],
"delete_uploaded_files_after": 300,
"time_format": r"%H:%M",
"load_event_handlers": False,
"event_handlers_file_name": "event_handlers.py",
},
"sound_devices": {
"output_device": 0,
"input_device": 0
},
"player": {
"default_volume": 50,
"max_volume": 100,
"volume_fading": True,
"volume_fading_interval": 0.025,
"seek_step": 5,
"player_options": {
"video": False,
"ytdl": False
},
},
"teamtalk": {
"hostname": "localhost",
"tcp_port": 10333,
"udp_port": 10333,
"encrypted": False,
"nickname": "TTMediaBot",
"status": "",
"gender": "n",
"username": "",
"password": "",
"channel": "/",
"channel_password": "",
"license_name": "",
"license_key": "",
"reconnection_attempts": -1,
"reconnection_timeout": 10,
"users": {
"admins": ["admin"],
"banned_users": []
}
},
"services": {
"available_services": {
"vk": {
"token": "",
},
"yt": {}
},
"default_service": "vk"
},
"logger": {
"log": True,
"level": "INFO",
"format": "%(levelname)s [%(asctime)s]: %(message)s in %(threadName)s file: %(filename)s line %(lineno)d function %(funcName)s",
"mode": "FILE",
"file_name": "TTMediaBot.log",
"max_file_size": 0,
"backup_count": 0
}
}
def save_default_file():
with open(utils.get_abs_path("config_default.json"), "w") as f:
json.dump(default_config, f, indent=4, ensure_ascii=False)
class Config(dict):
def __init__(self, file_name):
if file_name:
if utils.check_file_path(file_name):
self.file_name = file_name
with open(self.file_name, 'r', encoding='UTF-8') as f:
try:
config_dict = json.load(f)
except json.decoder.JSONDecodeError as e:
sys.exit("Syntax error in configuration file: " + str(e))
self.file_locker = portalocker.Lock(self.file_name, timeout=0, flags=portalocker.LOCK_EX|portalocker.LOCK_NB)
try:
self.file_locker.acquire()
except portalocker.exceptions.LockException:
raise PermissionError()
else:
sys.exit("Incorrect configuration file path")
else:
config_dict = {}
filled_config_dict = self.fill(config_dict, default_config)
types_dict = self.get_types_dict(default_config)
types_dict["teamtalk"]["channel"] = (int, str)
types_dict["logger"]["mode"] = (int, str)
self.check_types(filled_config_dict, types_dict)
super().__init__(filled_config_dict)
def close(self):
self.file_locker.release()
def check_types(self, data, template):
type_names_dict = {int: "integer", str: "string", float: "float", bool: "boolean", list: "list", dict: "dictionary"}
for key in template:
if type(template[key]) == dict:
self.check_types(data[key], template[key])
elif not type(data[key]) in template[key]:
sys.exit("Invalid type: \"{}\" param in config must be {} not {}".format(key, " or ".join([type_names_dict[i] for i in template[key]]), type_names_dict[type(data[key])]))
def fill(self, data, template):
result = {}
for key in template:
if key in data and type(template[key]) == dict:
result[key] = self.fill(data[key], template[key])
del data[key]
elif key in data:
result[key] = data[key]
del data[key]
else:
result[key] = template[key]
result.update(data)
return result
def get_types_dict(self, template):
result = {}
for key in template:
if isinstance(template[key], dict):
result[key] = self.get_types_dict(template[key])
else:
result[key] = (type(template[key]),)
return result
def save(self):
self.file_locker.release()
with open(self.file_name, 'w', encoding='UTF-8') as f:
json.dump(self, f, indent=4, ensure_ascii=False)
self.file_locker.acquire()
| 32.726027 | 186 | 0.531603 |
63ca04760e35ac2edd9649e8b131663d2ff2c7f4 | 1,711 | py | Python | src/add_Consumer.py | AdelineWang/Facial_Insights5 | 55f9f97088247fce56eeb9557c86e1f5837e7132 | [
"MIT"
] | null | null | null | src/add_Consumer.py | AdelineWang/Facial_Insights5 | 55f9f97088247fce56eeb9557c86e1f5837e7132 | [
"MIT"
] | 1 | 2018-07-05T19:42:49.000Z | 2018-07-05T19:42:49.000Z | src/add_Consumer.py | AdelineWang/Facial_Insights6 | 55f9f97088247fce56eeb9557c86e1f5837e7132 | [
"MIT"
] | 1 | 2018-07-05T23:57:22.000Z | 2018-07-05T23:57:22.000Z | # This file contains functions to make post request to the consumer tables.
import requests
import json
import asyncio
import os
from graphqlclient import GraphQLClient
import sys
# # Import Adelines Library.
sys.path.insert(0, '/home/ubuntu/fare/recognition/traits/src/')
from image_emotion_gender_demo import Person_Input #<= Import line for adeline's library
# GraphQL endpoint
GraphQL_Endpoint = 'http://35.183.111.132:82/graphql'
#This function will first perform the initial addition of the new Consumer
def addConsumer(uuid, laneNumber, storeNumber, timeDate, path):
client = GraphQLClient(GraphQL_Endpoint)
query = """
mutation AddConsumer($storeID:Int!, $uuid:Int!, $laneNumber:Int! $timeDate:Float!){
addConsumer(uuid: $uuid, storeID: $storeID, laneNumber: $laneNumber, timeDate: $timeDate){
uuid
}
}
"""
variables = {
'uuid': uuid,
'laneNumber': laneNumber,
'storeID': storeNumber,
'timeDate': timeDate
}
results = client.execute(query, variables)
addFacialInsights(uuid, path)
print(results)
def addFacialInsights(uuid, path):
client = GraphQLClient(GraphQL_Endpoint)
query = """
mutation UpdateFacialInsights($uuid:Int!, $facialInsights:inputFaceData!){
updateFacialInsights(uuid: $uuid, facialInsights: $facialInsights){
uuid
}
}
"""
#Obtain Facial Insights
personInput = Person_Input(path)
facialInsights = personInput.get_Insights(path)
variables = {
'uuid': uuid,
'facialInsights': facialInsights
}
client.execute(query, variables)
# Demo Script Call.
addConsumer(38, 3, 3, 12345.9, "../images")
| 28.04918 | 98 | 0.684395 |
ce004e1b4f00211b9d716e9ce66f326e2e344dbb | 125,726 | py | Python | container/google/cloud/container_v1/gapic/cluster_manager_client.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2019-04-16T08:13:06.000Z | 2019-04-16T08:13:06.000Z | container/google/cloud/container_v1/gapic/cluster_manager_client.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | null | null | null | container/google/cloud/container_v1/gapic/cluster_manager_client.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2020-11-15T11:44:36.000Z | 2020-11-15T11:44:36.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.container.v1 ClusterManager API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.container_v1.gapic import cluster_manager_client_config
from google.cloud.container_v1.gapic import enums
from google.cloud.container_v1.gapic.transports import cluster_manager_grpc_transport
from google.cloud.container_v1.proto import cluster_service_pb2
from google.cloud.container_v1.proto import cluster_service_pb2_grpc
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-container"
).version
class ClusterManagerClient(object):
"""Google Kubernetes Engine Cluster Manager v1"""
SERVICE_ADDRESS = "container.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.container.v1.ClusterManager"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterManagerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.ClusterManagerGrpcTransport,
Callable[[~.Credentials, type], ~.ClusterManagerGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = cluster_manager_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=cluster_manager_grpc_transport.ClusterManagerGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = cluster_manager_grpc_transport.ClusterManagerGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_clusters(
self,
project_id,
zone,
parent=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all clusters owned by a project in either the specified zone or all
zones.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> response = client.list_clusters(project_id, zone)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the parent field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides, or "-" for all zones. This field has been
deprecated and replaced by the parent field.
parent (str): The parent (project and location) where the clusters will be listed.
Specified in the format 'projects/*/locations/*'. Location "-" matches
all zones and all regions.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ListClustersResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_clusters" not in self._inner_api_calls:
self._inner_api_calls[
"list_clusters"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs["ListClusters"].retry,
default_timeout=self._method_configs["ListClusters"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.ListClustersRequest(
project_id=project_id, zone=zone, parent=parent
)
return self._inner_api_calls["list_clusters"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_cluster(
self,
project_id,
zone,
cluster_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the details of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.get_cluster(project_id, zone, cluster_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to retrieve.
This field has been deprecated and replaced by the name field.
name (str): The name (project, location, cluster) of the cluster to retrieve.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"get_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs["GetCluster"].retry,
default_timeout=self._method_configs["GetCluster"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetClusterRequest(
project_id=project_id, zone=zone, cluster_id=cluster_id, name=name
)
return self._inner_api_calls["get_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_cluster(
self,
project_id,
zone,
cluster,
parent=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a cluster, consisting of the specified number and type of Google
Compute Engine instances.
By default, the cluster is created in the project's `default
network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
One firewall is added for the cluster. After cluster creation, the
cluster creates routes for each node to allow the containers on that
node to communicate with all other instances in the cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(project_id, zone, cluster)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the parent field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the parent field.
cluster (Union[dict, ~google.cloud.container_v1.types.Cluster]): A `cluster
resource <https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters>`__
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.Cluster`
parent (str): The parent (project and location) where the cluster will be created.
Specified in the format 'projects/*/locations/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"create_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs["CreateCluster"].retry,
default_timeout=self._method_configs["CreateCluster"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CreateClusterRequest(
project_id=project_id, zone=zone, cluster=cluster, parent=parent
)
return self._inner_api_calls["create_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_cluster(
self,
project_id,
zone,
cluster_id,
update,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates the settings of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `update`:
>>> update = {}
>>>
>>> response = client.update_cluster(project_id, zone, cluster_id, update)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
update (Union[dict, ~google.cloud.container_v1.types.ClusterUpdate]): A description of the update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.ClusterUpdate`
name (str): The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"update_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_cluster,
default_retry=self._method_configs["UpdateCluster"].retry,
default_timeout=self._method_configs["UpdateCluster"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.UpdateClusterRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
update=update,
name=name,
)
return self._inner_api_calls["update_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_node_pool(
self,
project_id,
zone,
cluster_id,
node_pool_id,
node_version,
image_type,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates the version and/or image type for a specific node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `node_version`:
>>> node_version = ''
>>>
>>> # TODO: Initialize `image_type`:
>>> image_type = ''
>>>
>>> response = client.update_node_pool(project_id, zone, cluster_id, node_pool_id, node_version, image_type)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
node_pool_id (str): Deprecated. The name of the node pool to upgrade.
This field has been deprecated and replaced by the name field.
node_version (str): The Kubernetes version to change the nodes to (typically an
upgrade).
Users may specify either explicit versions offered by Kubernetes Engine or
version aliases, which have the following behavior:
- "latest": picks the highest valid Kubernetes version
- "1.X": picks the highest valid patch+gke.N patch in the 1.X version
- "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version
- "1.X.Y-gke.N": picks an explicit Kubernetes version
- "-": picks the Kubernetes master version
image_type (str): The desired image type for the node pool.
name (str): The name (project, location, cluster, node pool) of the node pool to
update. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_node_pool" not in self._inner_api_calls:
self._inner_api_calls[
"update_node_pool"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_node_pool,
default_retry=self._method_configs["UpdateNodePool"].retry,
default_timeout=self._method_configs["UpdateNodePool"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.UpdateNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
node_version=node_version,
image_type=image_type,
name=name,
)
return self._inner_api_calls["update_node_pool"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_node_pool_autoscaling(
self,
project_id,
zone,
cluster_id,
node_pool_id,
autoscaling,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the autoscaling settings for a specific node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `autoscaling`:
>>> autoscaling = {}
>>>
>>> response = client.set_node_pool_autoscaling(project_id, zone, cluster_id, node_pool_id, autoscaling)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
node_pool_id (str): Deprecated. The name of the node pool to upgrade.
This field has been deprecated and replaced by the name field.
autoscaling (Union[dict, ~google.cloud.container_v1.types.NodePoolAutoscaling]): Autoscaling configuration for the node pool.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NodePoolAutoscaling`
name (str): The name (project, location, cluster, node pool) of the node pool to set
autoscaler settings. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_node_pool_autoscaling" not in self._inner_api_calls:
self._inner_api_calls[
"set_node_pool_autoscaling"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_node_pool_autoscaling,
default_retry=self._method_configs["SetNodePoolAutoscaling"].retry,
default_timeout=self._method_configs["SetNodePoolAutoscaling"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNodePoolAutoscalingRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
autoscaling=autoscaling,
name=name,
)
return self._inner_api_calls["set_node_pool_autoscaling"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_logging_service(
self,
project_id,
zone,
cluster_id,
logging_service,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the logging service for a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `logging_service`:
>>> logging_service = ''
>>>
>>> response = client.set_logging_service(project_id, zone, cluster_id, logging_service)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
logging_service (str): The logging service the cluster should use to write metrics. Currently
available options:
- "logging.googleapis.com" - the Google Cloud Logging service
- "none" - no metrics will be exported from the cluster
name (str): The name (project, location, cluster) of the cluster to set logging.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_logging_service" not in self._inner_api_calls:
self._inner_api_calls[
"set_logging_service"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_logging_service,
default_retry=self._method_configs["SetLoggingService"].retry,
default_timeout=self._method_configs["SetLoggingService"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLoggingServiceRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
logging_service=logging_service,
name=name,
)
return self._inner_api_calls["set_logging_service"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_monitoring_service(
self,
project_id,
zone,
cluster_id,
monitoring_service,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the monitoring service for a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `monitoring_service`:
>>> monitoring_service = ''
>>>
>>> response = client.set_monitoring_service(project_id, zone, cluster_id, monitoring_service)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
monitoring_service (str): The monitoring service the cluster should use to write metrics.
Currently available options:
- "monitoring.googleapis.com" - the Google Cloud Monitoring service
- "none" - no metrics will be exported from the cluster
name (str): The name (project, location, cluster) of the cluster to set monitoring.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_monitoring_service" not in self._inner_api_calls:
self._inner_api_calls[
"set_monitoring_service"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_monitoring_service,
default_retry=self._method_configs["SetMonitoringService"].retry,
default_timeout=self._method_configs["SetMonitoringService"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetMonitoringServiceRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
monitoring_service=monitoring_service,
name=name,
)
return self._inner_api_calls["set_monitoring_service"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_addons_config(
self,
project_id,
zone,
cluster_id,
addons_config,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the addons for a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `addons_config`:
>>> addons_config = {}
>>>
>>> response = client.set_addons_config(project_id, zone, cluster_id, addons_config)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
addons_config (Union[dict, ~google.cloud.container_v1.types.AddonsConfig]): The desired configurations for the various addons available to run in the
cluster.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.AddonsConfig`
name (str): The name (project, location, cluster) of the cluster to set addons.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_addons_config" not in self._inner_api_calls:
self._inner_api_calls[
"set_addons_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_addons_config,
default_retry=self._method_configs["SetAddonsConfig"].retry,
default_timeout=self._method_configs["SetAddonsConfig"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetAddonsConfigRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
addons_config=addons_config,
name=name,
)
return self._inner_api_calls["set_addons_config"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_locations(
self,
project_id,
zone,
cluster_id,
locations,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the locations for a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `locations`:
>>> locations = []
>>>
>>> response = client.set_locations(project_id, zone, cluster_id, locations)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
locations (list[str]): The desired list of Google Compute Engine
`locations <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster's nodes should be located. Changing the locations a
cluster is in will result in nodes being either created or removed from
the cluster, depending on whether locations are being added or removed.
This list must always include the cluster's primary zone.
name (str): The name (project, location, cluster) of the cluster to set locations.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_locations" not in self._inner_api_calls:
self._inner_api_calls[
"set_locations"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_locations,
default_retry=self._method_configs["SetLocations"].retry,
default_timeout=self._method_configs["SetLocations"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLocationsRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
locations=locations,
name=name,
)
return self._inner_api_calls["set_locations"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_master(
self,
project_id,
zone,
cluster_id,
master_version,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates the master for a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `master_version`:
>>> master_version = ''
>>>
>>> response = client.update_master(project_id, zone, cluster_id, master_version)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
master_version (str): The Kubernetes version to change the master to.
Users may specify either explicit versions offered by Kubernetes Engine or
version aliases, which have the following behavior:
- "latest": picks the highest valid Kubernetes version
- "1.X": picks the highest valid patch+gke.N patch in the 1.X version
- "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version
- "1.X.Y-gke.N": picks an explicit Kubernetes version
- "-": picks the default Kubernetes version
name (str): The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_master" not in self._inner_api_calls:
self._inner_api_calls[
"update_master"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_master,
default_retry=self._method_configs["UpdateMaster"].retry,
default_timeout=self._method_configs["UpdateMaster"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.UpdateMasterRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
master_version=master_version,
name=name,
)
return self._inner_api_calls["update_master"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_master_auth(
self,
project_id,
zone,
cluster_id,
action,
update,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Used to set master auth materials. Currently supports :-
Changing the admin password for a specific cluster.
This can be either via password generation or explicitly set the password.
Example:
>>> from google.cloud import container_v1
>>> from google.cloud.container_v1 import enums
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `action`:
>>> action = enums.SetMasterAuthRequest.Action.UNKNOWN
>>>
>>> # TODO: Initialize `update`:
>>> update = {}
>>>
>>> response = client.set_master_auth(project_id, zone, cluster_id, action, update)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to upgrade.
This field has been deprecated and replaced by the name field.
action (~google.cloud.container_v1.types.Action): The exact form of action to be taken on the master auth.
update (Union[dict, ~google.cloud.container_v1.types.MasterAuth]): A description of the update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.MasterAuth`
name (str): The name (project, location, cluster) of the cluster to set auth.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_master_auth" not in self._inner_api_calls:
self._inner_api_calls[
"set_master_auth"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_master_auth,
default_retry=self._method_configs["SetMasterAuth"].retry,
default_timeout=self._method_configs["SetMasterAuth"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetMasterAuthRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
action=action,
update=update,
name=name,
)
return self._inner_api_calls["set_master_auth"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_cluster(
self,
project_id,
zone,
cluster_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.delete_cluster(project_id, zone, cluster_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to delete.
This field has been deprecated and replaced by the name field.
name (str): The name (project, location, cluster) of the cluster to delete.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"delete_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_cluster,
default_retry=self._method_configs["DeleteCluster"].retry,
default_timeout=self._method_configs["DeleteCluster"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.DeleteClusterRequest(
project_id=project_id, zone=zone, cluster_id=cluster_id, name=name
)
return self._inner_api_calls["delete_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_operations(
self,
project_id,
zone,
parent=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all operations in a project in a specific zone or all zones.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> response = client.list_operations(project_id, zone)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the parent field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ to
return operations for, or ``-`` for all zones. This field has been
deprecated and replaced by the parent field.
parent (str): The parent (project and location) where the operations will be listed.
Specified in the format 'projects/*/locations/*'. Location "-" matches
all zones and all regions.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ListOperationsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_operations" not in self._inner_api_calls:
self._inner_api_calls[
"list_operations"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_operations,
default_retry=self._method_configs["ListOperations"].retry,
default_timeout=self._method_configs["ListOperations"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.ListOperationsRequest(
project_id=project_id, zone=zone, parent=parent
)
return self._inner_api_calls["list_operations"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_operation(
self,
project_id,
zone,
operation_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the specified operation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `operation_id`:
>>> operation_id = ''
>>>
>>> response = client.get_operation(project_id, zone, operation_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
operation_id (str): Deprecated. The server-assigned ``name`` of the operation. This field
has been deprecated and replaced by the name field.
name (str): The name (project, location, operation id) of the operation to get.
Specified in the format 'projects/*/locations/*/operations/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_operation" not in self._inner_api_calls:
self._inner_api_calls[
"get_operation"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_operation,
default_retry=self._method_configs["GetOperation"].retry,
default_timeout=self._method_configs["GetOperation"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetOperationRequest(
project_id=project_id, zone=zone, operation_id=operation_id, name=name
)
return self._inner_api_calls["get_operation"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def cancel_operation(
self,
project_id,
zone,
operation_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Cancels the specified operation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `operation_id`:
>>> operation_id = ''
>>>
>>> client.cancel_operation(project_id, zone, operation_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the operation resides. This field has been deprecated and replaced
by the name field.
operation_id (str): Deprecated. The server-assigned ``name`` of the operation. This field
has been deprecated and replaced by the name field.
name (str): The name (project, location, operation id) of the operation to cancel.
Specified in the format 'projects/*/locations/*/operations/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "cancel_operation" not in self._inner_api_calls:
self._inner_api_calls[
"cancel_operation"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.cancel_operation,
default_retry=self._method_configs["CancelOperation"].retry,
default_timeout=self._method_configs["CancelOperation"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CancelOperationRequest(
project_id=project_id, zone=zone, operation_id=operation_id, name=name
)
self._inner_api_calls["cancel_operation"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_server_config(
self,
project_id,
zone,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns configuration info about the Kubernetes Engine service.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> response = client.get_server_config(project_id, zone)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ to
return operations for. This field has been deprecated and replaced by
the name field.
name (str): The name (project and location) of the server config to get Specified in
the format 'projects/*/locations/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ServerConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_server_config" not in self._inner_api_calls:
self._inner_api_calls[
"get_server_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_server_config,
default_retry=self._method_configs["GetServerConfig"].retry,
default_timeout=self._method_configs["GetServerConfig"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetServerConfigRequest(
project_id=project_id, zone=zone, name=name
)
return self._inner_api_calls["get_server_config"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_node_pools(
self,
project_id,
zone,
cluster_id,
parent=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists the node pools for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.list_node_pools(project_id, zone, cluster_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the parent field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the parent field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the parent field.
parent (str): The parent (project, location, cluster id) where the node pools will be
listed. Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ListNodePoolsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_node_pools" not in self._inner_api_calls:
self._inner_api_calls[
"list_node_pools"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_node_pools,
default_retry=self._method_configs["ListNodePools"].retry,
default_timeout=self._method_configs["ListNodePools"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.ListNodePoolsRequest(
project_id=project_id, zone=zone, cluster_id=cluster_id, parent=parent
)
return self._inner_api_calls["list_node_pools"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_node_pool(
self,
project_id,
zone,
cluster_id,
node_pool_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves the node pool requested.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> response = client.get_node_pool(project_id, zone, cluster_id, node_pool_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the name field.
node_pool_id (str): Deprecated. The name of the node pool.
This field has been deprecated and replaced by the name field.
name (str): The name (project, location, cluster, node pool id) of the node pool to
get. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.NodePool` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_node_pool" not in self._inner_api_calls:
self._inner_api_calls[
"get_node_pool"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_node_pool,
default_retry=self._method_configs["GetNodePool"].retry,
default_timeout=self._method_configs["GetNodePool"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
name=name,
)
return self._inner_api_calls["get_node_pool"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_node_pool(
self,
project_id,
zone,
cluster_id,
node_pool,
parent=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a node pool for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool`:
>>> node_pool = {}
>>>
>>> response = client.create_node_pool(project_id, zone, cluster_id, node_pool)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the parent field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the parent field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the parent field.
node_pool (Union[dict, ~google.cloud.container_v1.types.NodePool]): The node pool to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NodePool`
parent (str): The parent (project, location, cluster id) where the node pool will be
created. Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_node_pool" not in self._inner_api_calls:
self._inner_api_calls[
"create_node_pool"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_node_pool,
default_retry=self._method_configs["CreateNodePool"].retry,
default_timeout=self._method_configs["CreateNodePool"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CreateNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool=node_pool,
parent=parent,
)
return self._inner_api_calls["create_node_pool"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_node_pool(
self,
project_id,
zone,
cluster_id,
node_pool_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a node pool from a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> response = client.delete_node_pool(project_id, zone, cluster_id, node_pool_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the name field.
node_pool_id (str): Deprecated. The name of the node pool to delete.
This field has been deprecated and replaced by the name field.
name (str): The name (project, location, cluster, node pool id) of the node pool to
delete. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_node_pool" not in self._inner_api_calls:
self._inner_api_calls[
"delete_node_pool"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_node_pool,
default_retry=self._method_configs["DeleteNodePool"].retry,
default_timeout=self._method_configs["DeleteNodePool"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.DeleteNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
name=name,
)
return self._inner_api_calls["delete_node_pool"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def rollback_node_pool_upgrade(
self,
project_id,
zone,
cluster_id,
node_pool_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> response = client.rollback_node_pool_upgrade(project_id, zone, cluster_id, node_pool_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to rollback.
This field has been deprecated and replaced by the name field.
node_pool_id (str): Deprecated. The name of the node pool to rollback.
This field has been deprecated and replaced by the name field.
name (str): The name (project, location, cluster, node pool id) of the node poll to
rollback upgrade. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "rollback_node_pool_upgrade" not in self._inner_api_calls:
self._inner_api_calls[
"rollback_node_pool_upgrade"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.rollback_node_pool_upgrade,
default_retry=self._method_configs["RollbackNodePoolUpgrade"].retry,
default_timeout=self._method_configs["RollbackNodePoolUpgrade"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.RollbackNodePoolUpgradeRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
name=name,
)
return self._inner_api_calls["rollback_node_pool_upgrade"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_node_pool_management(
self,
project_id,
zone,
cluster_id,
node_pool_id,
management,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the NodeManagement options for a node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `management`:
>>> management = {}
>>>
>>> response = client.set_node_pool_management(project_id, zone, cluster_id, node_pool_id, management)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to update.
This field has been deprecated and replaced by the name field.
node_pool_id (str): Deprecated. The name of the node pool to update.
This field has been deprecated and replaced by the name field.
management (Union[dict, ~google.cloud.container_v1.types.NodeManagement]): NodeManagement configuration for the node pool.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NodeManagement`
name (str): The name (project, location, cluster, node pool id) of the node pool to
set management properties. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_node_pool_management" not in self._inner_api_calls:
self._inner_api_calls[
"set_node_pool_management"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_node_pool_management,
default_retry=self._method_configs["SetNodePoolManagement"].retry,
default_timeout=self._method_configs["SetNodePoolManagement"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNodePoolManagementRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
management=management,
name=name,
)
return self._inner_api_calls["set_node_pool_management"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_labels(
self,
project_id,
zone,
cluster_id,
resource_labels,
label_fingerprint,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets labels on a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `resource_labels`:
>>> resource_labels = {}
>>>
>>> # TODO: Initialize `label_fingerprint`:
>>> label_fingerprint = ''
>>>
>>> response = client.set_labels(project_id, zone, cluster_id, resource_labels, label_fingerprint)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the name field.
resource_labels (dict[str -> str]): The labels to set for that cluster.
label_fingerprint (str): The fingerprint of the previous set of labels for this resource,
used to detect conflicts. The fingerprint is initially generated by
Kubernetes Engine and changes after every request to modify or update
labels. You must always provide an up-to-date fingerprint hash when
updating or changing labels. Make a <code>get()</code> request to the
resource to get the latest fingerprint.
name (str): The name (project, location, cluster id) of the cluster to set labels.
Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_labels" not in self._inner_api_calls:
self._inner_api_calls[
"set_labels"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_labels,
default_retry=self._method_configs["SetLabels"].retry,
default_timeout=self._method_configs["SetLabels"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLabelsRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
resource_labels=resource_labels,
label_fingerprint=label_fingerprint,
name=name,
)
return self._inner_api_calls["set_labels"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_legacy_abac(
self,
project_id,
zone,
cluster_id,
enabled,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Enables or disables the ABAC authorization mechanism on a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `enabled`:
>>> enabled = False
>>>
>>> response = client.set_legacy_abac(project_id, zone, cluster_id, enabled)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to update.
This field has been deprecated and replaced by the name field.
enabled (bool): Whether ABAC authorization will be enabled in the cluster.
name (str): The name (project, location, cluster id) of the cluster to set legacy
abac. Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_legacy_abac" not in self._inner_api_calls:
self._inner_api_calls[
"set_legacy_abac"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_legacy_abac,
default_retry=self._method_configs["SetLegacyAbac"].retry,
default_timeout=self._method_configs["SetLegacyAbac"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLegacyAbacRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
enabled=enabled,
name=name,
)
return self._inner_api_calls["set_legacy_abac"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def start_i_p_rotation(
self,
project_id,
zone,
cluster_id,
name=None,
rotate_credentials=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Start master IP rotation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.start_i_p_rotation(project_id, zone, cluster_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the name field.
name (str): The name (project, location, cluster id) of the cluster to start IP
rotation. Specified in the format 'projects/*/locations/*/clusters/\*'.
rotate_credentials (bool): Whether to rotate credentials during IP rotation.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "start_i_p_rotation" not in self._inner_api_calls:
self._inner_api_calls[
"start_i_p_rotation"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.start_i_p_rotation,
default_retry=self._method_configs["StartIPRotation"].retry,
default_timeout=self._method_configs["StartIPRotation"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.StartIPRotationRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
name=name,
rotate_credentials=rotate_credentials,
)
return self._inner_api_calls["start_i_p_rotation"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def complete_i_p_rotation(
self,
project_id,
zone,
cluster_id,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Completes master IP rotation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.complete_i_p_rotation(project_id, zone, cluster_id)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the name field.
name (str): The name (project, location, cluster id) of the cluster to complete IP
rotation. Specified in the format 'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "complete_i_p_rotation" not in self._inner_api_calls:
self._inner_api_calls[
"complete_i_p_rotation"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.complete_i_p_rotation,
default_retry=self._method_configs["CompleteIPRotation"].retry,
default_timeout=self._method_configs["CompleteIPRotation"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CompleteIPRotationRequest(
project_id=project_id, zone=zone, cluster_id=cluster_id, name=name
)
return self._inner_api_calls["complete_i_p_rotation"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_node_pool_size(
self,
project_id,
zone,
cluster_id,
node_pool_id,
node_count,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the size for a specific node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `node_count`:
>>> node_count = 0
>>>
>>> response = client.set_node_pool_size(project_id, zone, cluster_id, node_pool_id, node_count)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__. This field
has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster to update.
This field has been deprecated and replaced by the name field.
node_pool_id (str): Deprecated. The name of the node pool to update.
This field has been deprecated and replaced by the name field.
node_count (int): The desired node count for the pool.
name (str): The name (project, location, cluster, node pool id) of the node pool to
set size. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_node_pool_size" not in self._inner_api_calls:
self._inner_api_calls[
"set_node_pool_size"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_node_pool_size,
default_retry=self._method_configs["SetNodePoolSize"].retry,
default_timeout=self._method_configs["SetNodePoolSize"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNodePoolSizeRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
node_count=node_count,
name=name,
)
return self._inner_api_calls["set_node_pool_size"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_network_policy(
self,
project_id,
zone,
cluster_id,
network_policy,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Enables/Disables Network Policy for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `network_policy`:
>>> network_policy = {}
>>>
>>> response = client.set_network_policy(project_id, zone, cluster_id, network_policy)
Args:
project_id (str): Deprecated. The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name field.
zone (str): Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides. This field has been deprecated and replaced
by the name field.
cluster_id (str): Deprecated. The name of the cluster.
This field has been deprecated and replaced by the name field.
network_policy (Union[dict, ~google.cloud.container_v1.types.NetworkPolicy]): Configuration options for the NetworkPolicy feature.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NetworkPolicy`
name (str): The name (project, location, cluster id) of the cluster to set
networking policy. Specified in the format
'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_network_policy" not in self._inner_api_calls:
self._inner_api_calls[
"set_network_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_network_policy,
default_retry=self._method_configs["SetNetworkPolicy"].retry,
default_timeout=self._method_configs["SetNetworkPolicy"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNetworkPolicyRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
network_policy=network_policy,
name=name,
)
return self._inner_api_calls["set_network_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def set_maintenance_policy(
self,
project_id,
zone,
cluster_id,
maintenance_policy,
name=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the maintenance policy for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `maintenance_policy`:
>>> maintenance_policy = {}
>>>
>>> response = client.set_maintenance_policy(project_id, zone, cluster_id, maintenance_policy)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to update.
maintenance_policy (Union[dict, ~google.cloud.container_v1.types.MaintenancePolicy]): The maintenance policy to be set for the cluster. An empty field
clears the existing maintenance policy.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.MaintenancePolicy`
name (str): The name (project, location, cluster id) of the cluster to set
maintenance policy. Specified in the format
'projects/*/locations/*/clusters/\*'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_maintenance_policy" not in self._inner_api_calls:
self._inner_api_calls[
"set_maintenance_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_maintenance_policy,
default_retry=self._method_configs["SetMaintenancePolicy"].retry,
default_timeout=self._method_configs["SetMaintenancePolicy"].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetMaintenancePolicyRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
maintenance_policy=maintenance_policy,
name=name,
)
return self._inner_api_calls["set_maintenance_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| 44.710526 | 162 | 0.590586 |
0ee81bb3c5e7291b9a087979bcea587a16755b8a | 1,967 | py | Python | tools/get_used_module_version.py | HrTran/unicorn-binance-websocket-api | 64da01868a8c251a486993b868f0de5083922d64 | [
"MIT"
] | null | null | null | tools/get_used_module_version.py | HrTran/unicorn-binance-websocket-api | 64da01868a8c251a486993b868f0de5083922d64 | [
"MIT"
] | null | null | null | tools/get_used_module_version.py | HrTran/unicorn-binance-websocket-api | 64da01868a8c251a486993b868f0de5083922d64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: tools/get_used_module_version.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
# create instance of BinanceWebSocketApiManager
binance_websocket_api_manager = BinanceWebSocketApiManager()
# get version of the used UNICORN Binance WebSocket API
print(binance_websocket_api_manager.get_version())
binance_websocket_api_manager.stop_manager_with_all_streams()
| 44.704545 | 106 | 0.790544 |
e87454d0a890b2f69909b3a66092a752010b97cc | 1,699 | py | Python | AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/byteyears.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/byteyears.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/byteyears.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | #! /usr/bin/env python
# Print the product of age and size of each file, in suitable units.
#
# Usage: byteyears [ -a | -m | -c ] file ...
#
# Options -[amc] select atime, mtime (default) or ctime as age.
import sys, os, time
from stat import *
def main():
# Use lstat() to stat files if it exists, else stat()
try:
statfunc = os.lstat
except AttributeError:
statfunc = os.stat
# Parse options
if sys.argv[1] == '-m':
itime = ST_MTIME
del sys.argv[1]
elif sys.argv[1] == '-c':
itime = ST_CTIME
del sys.argv[1]
elif sys.argv[1] == '-a':
itime = ST_CTIME
del sys.argv[1]
else:
itime = ST_MTIME
secs_per_year = 365.0 * 24.0 * 3600.0 # Scale factor
now = time.time() # Current time, for age computations
status = 0 # Exit status, set to 1 on errors
# Compute max file name length
maxlen = 1
for filename in sys.argv[1:]:
maxlen = max(maxlen, len(filename))
# Process each argument in turn
for filename in sys.argv[1:]:
try:
st = statfunc(filename)
except os.error, msg:
sys.stderr.write("can't stat %r: %r\n" % (filename, msg))
status = 1
st = ()
if st:
anytime = st[itime]
size = st[ST_SIZE]
age = now - anytime
byteyears = float(size) * float(age) / secs_per_year
print filename.ljust(maxlen),
print repr(int(byteyears)).rjust(8)
sys.exit(status)
if __name__ == '__main__':
main()
| 27.403226 | 81 | 0.516775 |
eb703cfc42dafed553f272b4db79c6318b8c046e | 4,862 | py | Python | ClemBot.Bot/bot/services/user_handling_service.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | 1 | 2022-03-01T20:16:18.000Z | 2022-03-01T20:16:18.000Z | ClemBot.Bot/bot/services/user_handling_service.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | null | null | null | ClemBot.Bot/bot/services/user_handling_service.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | null | null | null | import datetime as datetime
import logging
from datetime import datetime
import typing as t
import dataclasses
import asyncio
import discord
from bot.consts import Colors, DesignatedChannels
from bot.messaging.events import Events
from bot.services.base_service import BaseService
import bot.utils.log_serializers as serializers
log = logging.getLogger(__name__)
UPDATE_EVENT_EMPTY_QUEUE_WAIT_TIME = 0.5
@dataclasses.dataclass
class UpdateEvent:
user_id: int
user_roles_ids: t.List[int]
class UserHandlingService(BaseService):
def __init__(self, *, bot):
# UserId cache so that we don't hit the database on subsequent user updates
self.user_id_cache: t.List[int] = []
super().__init__(bot)
@BaseService.Listener(Events.on_user_joined)
async def on_user_joined(self, user: discord.Member) -> None:
log.info('"{member}" has joined guild "{guild}"',
member=serializers.log_user(user),
guild=serializers.log_guild(user.guild))
db_user = await self.bot.user_route.get_user(user.id)
if not db_user:
await self.bot.user_route.create_user(user.id, user.name)
await self.bot.user_route.add_user_guild(user.id, user.guild.id, raise_on_error=True)
await self.bot.user_route.update_roles(user.id, [r.id for r in user.roles])
await self.notify_user_join(user)
@BaseService.Listener(Events.on_user_removed)
async def on_user_removed(self, user) -> None:
log.info('"{user}" has left guild "{guild}"',
user=serializers.log_user(user),
guild=serializers.log_guild(user.guild))
# Even though a user leaving a server doesn't clear them from the db
# Its unlikely they are in multiple clembot servers
# So remove them from the cache to keep its size down, and they will be
# Readded the next time they are edited
if user.id in self.user_id_cache:
log.info('Removing {user} from the cache, new cache size is {size}',
user=serializers.log_user(user),
size=len(self.user_id_cache) - 1)
self.user_id_cache.remove(user.id)
await self.bot.user_route.remove_user_guild(user.id, user.guild.id)
await self.notify_user_remove(user)
@BaseService.Listener(Events.on_member_update)
async def on_member_update(self, before: discord.Member, after: discord.Member):
# only update roles if they have changed
if set(r.id for r in before.roles) == set(r.id for r in after.roles):
return
# If user is not in local cache check the db
if before.id not in self.user_id_cache:
# If user is not in the db bail out
if not await self.bot.user_route.get_user(before.id):
# Possibly add them to the db if they don't exist
# For future enhancement
return
log.info('Adding {user} to the cache, new cache size is {size}',
user=serializers.log_user(before),
size=(len(self.user_id_cache) + 1))
# This user exists, add it to the cache
self.user_id_cache.append(before.id)
await self.bot.user_route.update_roles(before.id, [r.id for r in after.roles], raise_on_error=False)
async def notify_user_join(self, user: discord.Member):
embed = discord.Embed(title='New User Joined', color=Colors.ClemsonOrange)
embed.add_field(name='Username', value=self.get_full_name(user))
embed.add_field(name='Account Creation date', value=user.created_at.date())
embed.set_thumbnail(url=user.display_avatar.url)
embed.set_footer(text=str(datetime.now().date()))
await self.bot.messenger.publish(Events.on_send_in_designated_channel,
DesignatedChannels.user_join_log,
user.guild.id,
embed)
async def notify_user_remove(self, user: discord.Member):
embed = discord.Embed(title='Guild User Left', color=Colors.Error)
embed.add_field(name='Username', value=self.get_full_name(user))
embed.add_field(name='Account Creation date', value=user.created_at.date())
embed.set_thumbnail(url=user.display_avatar.url)
embed.set_footer(text=str(datetime.now().date()))
await self.bot.messenger.publish(Events.on_send_in_designated_channel,
DesignatedChannels.user_leave_log,
user.guild.id,
embed)
def get_full_name(self, author) -> str:
return f'{author.name}#{author.discriminator}'
async def load_service(self) -> None:
pass
| 40.516667 | 108 | 0.642945 |
4d4bcdf639e44ce371a8e230877b42cfd3720367 | 1,285 | py | Python | handlers/editcomment.py | Christianq010/udacity-multi-user-blog-master | 3dae18001df632ecdd78481efea07e10af91f515 | [
"MIT"
] | 6 | 2017-01-13T06:15:40.000Z | 2018-07-30T03:11:15.000Z | handlers/editcomment.py | Christianq010/udacity-multi-user-blog-master | 3dae18001df632ecdd78481efea07e10af91f515 | [
"MIT"
] | 1 | 2017-01-20T00:35:59.000Z | 2017-01-20T00:37:06.000Z | handlers/editcomment.py | Christianq010/udacity-multi-user-blog-master | 3dae18001df632ecdd78481efea07e10af91f515 | [
"MIT"
] | 3 | 2017-01-19T19:00:09.000Z | 2019-03-02T10:15:12.000Z | from google.appengine.ext import db
from handlers.blog import BlogHandler
from helpers import *
class EditCommentHandler(BlogHandler):
def get(self, post_id, post_user_id, comment_id):
if self.user and self.user.key().id() == int(post_user_id):
postKey = db.Key.from_path('Post', int(post_id), parent=blog_key())
key = db.Key.from_path('Comment', int(comment_id), parent=postKey)
comment = db.get(key)
self.render('editcomment.html', content=comment.content)
elif not self.user:
self.redirect('/login')
else:
self.write("You don't have permission to edit this comment.")
def post(self, post_id, post_user_id, comment_id):
if not self.user:
return
if self.user and self.user.key().id() == int(post_user_id):
content = self.request.get('content')
postKey = db.Key.from_path('Post', int(post_id), parent=blog_key())
key = db.Key.from_path('Comment', int(comment_id), parent=postKey)
comment = db.get(key)
comment.content = content
comment.put()
self.redirect('/' + post_id)
else:
self.write("You don't have permission to edit this comment.") | 33.815789 | 79 | 0.607004 |
02eb930dfccc3dd4acad82943900c26c3c13ffa1 | 2,357 | py | Python | models/DistMult.py | dhruvdcoder/OpenKE | c11f0958f4d0a08046b57cd41d83cedee70c64ac | [
"MIT"
] | 2 | 2019-08-05T16:00:44.000Z | 2019-09-06T04:51:09.000Z | models/DistMult.py | dhruvdcoder/OpenKE | c11f0958f4d0a08046b57cd41d83cedee70c64ac | [
"MIT"
] | 1 | 2019-11-16T10:06:13.000Z | 2019-11-16T10:06:13.000Z | models/DistMult.py | dhruvdcoder/OpenKE | c11f0958f4d0a08046b57cd41d83cedee70c64ac | [
"MIT"
] | 3 | 2019-09-09T13:14:05.000Z | 2022-02-11T19:30:39.000Z | #coding:utf-8
import numpy as np
import tensorflow as tf
from .Model import Model
class DistMult(Model):
r'''
DistMult is based on the bilinear model where each relation is represented by a diagonal rather than a full matrix.
DistMult enjoys the same scalable property as TransE and it achieves superior performance over TransE.
'''
def _calc(self, h, t, r):
return tf.reduce_sum(h * r * t, -1, keep_dims = False)
def embedding_def(self):
config = self.get_config()
self.ent_embeddings = tf.get_variable(name = "ent_embeddings", shape = [config.entTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = True))
self.rel_embeddings = tf.get_variable(name = "rel_embeddings", shape = [config.relTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = True))
self.parameter_lists = {"ent_embeddings":self.ent_embeddings, \
"rel_embeddings":self.rel_embeddings}
def loss_def(self):
config = self.get_config()
pos_h, pos_t, pos_r = self.get_positive_instance(in_batch = True)
neg_h, neg_t, neg_r = self.get_negative_instance(in_batch = True)
pos_y = self.get_positive_labels(in_batch = True)
neg_y = self.get_negative_labels(in_batch = True)
p_h = tf.nn.embedding_lookup(self.ent_embeddings, pos_h)
p_t = tf.nn.embedding_lookup(self.ent_embeddings, pos_t)
p_r = tf.nn.embedding_lookup(self.rel_embeddings, pos_r)
n_h = tf.nn.embedding_lookup(self.ent_embeddings, neg_h)
n_t = tf.nn.embedding_lookup(self.ent_embeddings, neg_t)
n_r = tf.nn.embedding_lookup(self.rel_embeddings, neg_r)
_p_score = self._calc(p_h, p_t, p_r)
_n_score = self._calc(n_h, n_t, n_r)
print (_n_score.get_shape())
loss_func = tf.reduce_mean(tf.nn.softplus(- pos_y * _p_score) + tf.nn.softplus(- neg_y * _n_score))
regul_func = tf.reduce_mean(p_h ** 2 + p_t ** 2 + p_r ** 2 + n_h ** 2 + n_t ** 2 + n_r ** 2)
self.loss = loss_func + config.lmbda * regul_func
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_h)
predict_t_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_t)
predict_r_e = tf.nn.embedding_lookup(self.rel_embeddings, predict_r)
self.predict = -self._calc(predict_h_e, predict_t_e, predict_r_e)
| 50.148936 | 179 | 0.748409 |
73e8f2151c08f6169d730575a18d35cbd2f151d1 | 1,121 | py | Python | packages/regression_model/regression_model/processing/validation.py | karanvijaygit/DMLM | aaeb3e65d0a58ad583289aaa39b089f11d06a4eb | [
"BSD-3-Clause"
] | 477 | 2019-02-14T11:24:29.000Z | 2022-03-31T08:43:50.000Z | packages/regression_model/regression_model/processing/validation.py | karanvijaygit/DMLM | aaeb3e65d0a58ad583289aaa39b089f11d06a4eb | [
"BSD-3-Clause"
] | 51 | 2019-05-11T11:00:48.000Z | 2021-12-08T14:50:33.000Z | packages/regression_model/regression_model/processing/validation.py | karanvijaygit/DMLM | aaeb3e65d0a58ad583289aaa39b089f11d06a4eb | [
"BSD-3-Clause"
] | 4,870 | 2019-01-20T11:04:50.000Z | 2022-03-31T12:37:17.000Z | from regression_model.config import config
import pandas as pd
def validate_inputs(input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for unprocessable values."""
validated_data = input_data.copy()
# check for numerical variables with NA not seen during training
if input_data[config.NUMERICAL_NA_NOT_ALLOWED].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.NUMERICAL_NA_NOT_ALLOWED
)
# check for categorical variables with NA not seen during training
if input_data[config.CATEGORICAL_NA_NOT_ALLOWED].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.CATEGORICAL_NA_NOT_ALLOWED
)
# check for values <= 0 for the log transformed variables
if (input_data[config.NUMERICALS_LOG_VARS] <= 0).any().any():
vars_with_neg_values = config.NUMERICALS_LOG_VARS[
(input_data[config.NUMERICALS_LOG_VARS] <= 0).any()
]
validated_data = validated_data[validated_data[vars_with_neg_values] > 0]
return validated_data
| 36.16129 | 81 | 0.709188 |
4623c8d1e8c15b0065ff44e55d3a3f5b8f6a1d4b | 1,623 | py | Python | scripts/eval_adaptor.py | dunknowcoding/RGANet | 206715439c445f67489400d8d0a45b906ecfee16 | [
"Apache-2.0"
] | 4 | 2021-12-29T06:49:04.000Z | 2022-01-31T13:13:05.000Z | scripts/eval_adaptor.py | dunknowcoding/RGANet | 206715439c445f67489400d8d0a45b906ecfee16 | [
"Apache-2.0"
] | null | null | null | scripts/eval_adaptor.py | dunknowcoding/RGANet | 206715439c445f67489400d8d0a45b906ecfee16 | [
"Apache-2.0"
] | 1 | 2022-01-12T09:24:31.000Z | 2022-01-12T09:24:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
: Project - Comparision experiments
: Splite evaluation texts into individual text files for SPSS
: Author - Xi Mo
: Institute - University of Kansas
: Date - 6/13/2021 last updated 6/13/2021
"""
from pathlib import Path
evalPath = Path(r"E:\RGANet\results_acrt\evaluation\evaluation.txt").resolve()
splitPath = Path(r"E:\RGANet\results_acrt\evaluation\metircs").resolve()
splitPath.mkdir(exist_ok=True, parents=True)
splitPath.joinpath("acc.txt").unlink(missing_ok=True)
splitPath.joinpath("dice.txt").unlink(missing_ok=True)
splitPath.joinpath("fpr.txt").unlink(missing_ok=True)
splitPath.joinpath("iou.txt").unlink(missing_ok=True)
splitPath.joinpath("mcc.txt").unlink(missing_ok=True)
splitPath.joinpath("mgrid.txt").unlink(missing_ok=True)
splitPath.joinpath("name.txt").unlink(missing_ok=True)
splitPath.joinpath("precision.txt").unlink(missing_ok=True)
splitPath.joinpath("recall.txt").unlink(missing_ok=True)
splitPath.joinpath("tpr.txt").unlink(missing_ok=True)
if __name__ == '__main__':
with open(evalPath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
oneItem = line.split(sep=" ")
for idx, item in enumerate(oneItem):
if idx == 0:
t = ["name", item]
else:
t = item.split(sep=":")
fileName = t[0] + ".txt"
filePath = splitPath.joinpath(fileName)
with open(filePath, 'a+') as g:
g.write(f"{t[1]}\n")
print("Done spliting!")
| 34.531915 | 78 | 0.643869 |
7dad41a00a5772adc6636b507a0f34843824a70a | 1,494 | py | Python | renaming_combining/combining.py | CodeXfull/Pandas | 08b0adc28eedba47f6eb8303ba6a36a37ababb92 | [
"MIT"
] | null | null | null | renaming_combining/combining.py | CodeXfull/Pandas | 08b0adc28eedba47f6eb8303ba6a36a37ababb92 | [
"MIT"
] | null | null | null | renaming_combining/combining.py | CodeXfull/Pandas | 08b0adc28eedba47f6eb8303ba6a36a37ababb92 | [
"MIT"
] | null | null | null | """
Combinando
Ao executar operações em um conjunto de dados, às vezes precisaremos combinar DataFrames e / ou séries diferentes de maneiras não triviais.
O Pandas possui três métodos principais para fazer isso. Em ordem crescente de complexidade, são concat (), join () e merge (). A maior parte do que merge () pode fazer também pode ser feita de maneira mais simples com join ().
O método de combinação mais simples é concat (). Dada uma lista de elementos, essa função agrupará esses elementos ao longo de um eixo.
Isso é útil quando temos dados em diferentes objetos DataFrame ou Series, mas com os MESMOS CAMPOS (colunas).
"""
import pandas as pd
canadian_youtube = pd.read_csv("./renaming_combining/CAvideos.csv")
british_youtube = pd.read_csv("./renaming_combining/GBvideos.csv")
x = pd.concat([canadian_youtube, british_youtube])
print(x)
"""join() permite combinar diferentes objetos DataFrame que têm um índice em comum. Por exemplo, para exibir vídeos que estavam em alta no mesmo dia no Canadá e no Reino Unido, poderíamos fazer o seguinte:
Os parâmetros lsuffix e rsuffix são necessários aqui porque os dados têm os mesmos nomes de coluna nos conjuntos de dados britânicos e canadenses. Se isso não fosse verdade (porque, digamos, nós os renomeamos antes), não precisaríamos deles."""
left = canadian_youtube.set_index(['title', 'trending_date'])
right = british_youtube.set_index(['title', 'trending_date'])
y = left.join(right, lsuffix='_CAN', rsuffix='_UK')
print(y) | 55.333333 | 244 | 0.774431 |
af0edaa29a19b71d5ce67788139cf8c59b8d12d2 | 7,143 | py | Python | larch/wxxas/taskpanel.py | Bob620/xraylarch | f8d38e6122cc0e8c990b0f024db3b503a5fbf057 | [
"BSD-2-Clause"
] | null | null | null | larch/wxxas/taskpanel.py | Bob620/xraylarch | f8d38e6122cc0e8c990b0f024db3b503a5fbf057 | [
"BSD-2-Clause"
] | null | null | null | larch/wxxas/taskpanel.py | Bob620/xraylarch | f8d38e6122cc0e8c990b0f024db3b503a5fbf057 | [
"BSD-2-Clause"
] | null | null | null | import time
import os
import platform
from functools import partial
from collections import OrderedDict
import numpy as np
np.seterr(all='ignore')
import wx
from larch import Group
from larch.wxlib import (BitmapButton, SetTip, GridPanel, FloatCtrl,
FloatSpin, FloatSpinWithPin, get_icon, SimpleText,
pack, Button, HLine, Choice, Check, MenuItem,
GUIColors, CEN, RCEN, LCEN, FRAMESTYLE, Font,
FileSave, FileOpen, FONTSIZE)
from larch.wxlib.plotter import last_cursor_pos
from larch.utils import group2dict
LCEN = wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL
CEN |= wx.ALL
class TaskPanel(wx.Panel):
"""generic panel for main tasks.
meant to be subclassed
"""
def __init__(self, parent, controller, title='Generic Panel',
configname='task_config', config=None, **kws):
wx.Panel.__init__(self, parent, -1, size=(550, 625), **kws)
self.parent = parent
self.controller = controller
self.larch = controller.larch
self.title = title
self.configname = configname
if config is not None:
self.set_defaultconfig(config)
self.wids = {}
self.subframes = {}
self.SetFont(Font(FONTSIZE))
self.panel = GridPanel(self, ncols=7, nrows=10, pad=2, itemstyle=LCEN)
self.panel.sizer.SetVGap(5)
self.panel.sizer.SetHGap(5)
self.skip_process = True
self.skip_plotting = False
self.build_display()
self.skip_process = False
def show_subframe(self, name, frameclass, **opts):
shown = False
if name in self.subframes:
try:
self.subframes[name].Raise()
shown = True
except:
del self.subframes[name]
if not shown:
self.subframes[name] = frameclass(self, **opts)
def onPanelExposed(self, **kws):
# called when notebook is selected
fname = self.controller.filelist.GetStringSelection()
if fname in self.controller.file_groups:
gname = self.controller.file_groups[fname]
dgroup = self.controller.get_group(gname)
self.fill_form(dgroup)
self.process(dgroup=dgroup)
def larch_eval(self, cmd):
"""eval"""
self.controller.larch.eval(cmd)
def larch_get(self, sym):
"""get value from larch symbol table"""
return self.controller.larch.symtable.get_symbol(sym)
def build_display(self):
"""build display"""
titleopts = dict(font=Font(FONTSIZE+1), colour='#AA0000')
self.panel.Add(SimpleText(self.panel, self.title, **titleopts),
dcol=7)
self.panel.Add(SimpleText(self.panel, ' coming soon....'),
dcol=7, newrow=True)
self.panel.pack()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.panel, 1, wx.LEFT|wx.CENTER, 3)
pack(self, sizer)
def set_defaultconfig(self, config):
"""set the default configuration for this session"""
# print("SET DEFAULT CONFIG ", self.configname, config)
conf = self.controller.larch.symtable._sys.xas_viewer
setattr(conf, self.configname, {key:val for key, val in config.items()})
def get_defaultconfig(self):
"""get the default configuration for this session"""
conf = self.controller.larch.symtable._sys.xas_viewer
defconf = getattr(conf, self.configname, {})
return {key:val for key, val in defconf.items()}
def get_config(self, dgroup=None):
"""get and set processing configuration for a group"""
if dgroup is None:
dgroup = self.controller.get_group()
conf = getattr(dgroup, self.configname, self.get_defaultconfig())
if dgroup is not None:
setattr(dgroup, self.configname, conf)
return conf
def update_config(self, config, dgroup=None):
"""set/update processing configuration for a group"""
if dgroup is None:
dgroup = self.controller.get_group()
conf = getattr(dgroup, self.configname, self.get_defaultconfig())
conf.update(config)
if dgroup is not None:
setattr(dgroup, self.configname, conf)
def fill_form(self, dat):
if isinstance(dat, Group):
dat = group2dict(dat)
for name, wid in self.wids.items():
if isinstance(wid, FloatCtrl) and name in dat:
wid.SetValue(dat[name])
def read_form(self):
"read for, returning dict of values"
dgroup = self.controller.get_group()
form_opts = {'groupname': dgroup.groupname}
for name, wid in self.wids.items():
val = None
for method in ('GetStringSelection', 'IsChecked',
'GetValue', 'GetLabel'):
meth = getattr(wid, method, None)
if callable(meth):
try:
val = meth()
except TypeError:
pass
if val is not None:
break
form_opts[name] = val
return form_opts
def process(self, dgroup=None, **kws):
"""override to handle data process step"""
if self.skip_process:
return
self.skip_process = True
# form = self.read_form()
def add_text(self, text, dcol=1, newrow=True):
self.panel.Add(SimpleText(self.panel, text),
dcol=dcol, newrow=newrow)
def add_floatspin(self, name, value, with_pin=True, relative_e0=False,
**kws):
"""create FloatSpin with Pin button for onSelPoint"""
if with_pin:
pin_action = partial(self.onSelPoint, opt=name,
relative_e0=relative_e0)
fspin, bb = FloatSpinWithPin(self.panel, value=value,
pin_action=pin_action, **kws)
else:
fspin = FloatSpin(self.panel, value=value, **kws)
bb = (1, 1)
self.wids[name] = fspin
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(fspin)
sizer.Add(bb)
return sizer
def onPlot(self, evt=None):
pass
def onPlotOne(self, evt=None, dgroup=None, **kws):
pass
def onPlotSel(self, evt=None, groups=None, **kws):
pass
def onSelPoint(self, evt=None, opt='__', relative_e0=False, win=None):
"""
get last selected point from a specified plot window
and fill in the value for the widget defined by `opt`.
by default it finds the latest cursor position from the
cursor history of the first 20 plot windows.
"""
if opt not in self.wids:
return None
_x, _y = last_cursor_pos(win=win, _larch=self.larch)
if _x is not None:
if relative_e0 and 'e0' in self.wids and opt is not 'e0':
_x -= self.wids['e0'].GetValue()
self.wids[opt].SetValue(_x)
| 35.014706 | 80 | 0.584488 |
d1ca6cd26fd8115500df3ad576ed697600b347ee | 1,838 | py | Python | kashgari/tasks/classification/cnn_model.py | carsondahlberg/Kashgari | 0bbb12344aaf43aeb4d8ffce39848f8565f87528 | [
"MIT"
] | 1 | 2019-01-27T12:54:51.000Z | 2019-01-27T12:54:51.000Z | kashgari/tasks/classification/cnn_model.py | cmcai0104/Kashgari | 7111d2d2959f234c05a0db06073c6142f826a8f7 | [
"MIT"
] | 6 | 2020-01-28T22:33:57.000Z | 2022-02-10T00:08:16.000Z | kashgari/tasks/classification/cnn_model.py | cmcai0104/Kashgari | 7111d2d2959f234c05a0db06073c6142f826a8f7 | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: cnn_model.py
@time: 2019-01-21 17:49
"""
import logging
from keras.layers import Dense, Conv1D, GlobalMaxPooling1D
from keras.models import Model
from kashgari.tasks.classification.base_model import ClassificationModel
class CNNModel(ClassificationModel):
__base_hyper_parameters__ = {
'conv1d_layer': {
'filters': 128,
'kernel_size': 5,
'activation': 'relu'
},
'max_pool_layer': {},
'dense_1_layer': {
'units': 64,
'activation': 'relu'
}
}
def build_model(self):
base_model = self.embedding.model
conv1d_layer = Conv1D(**self.hyper_parameters['conv1d_layer'])(base_model.output)
max_pool_layer = GlobalMaxPooling1D(**self.hyper_parameters['max_pool_layer'])(conv1d_layer)
dense_1_layer = Dense(**self.hyper_parameters['dense_1_layer'])(max_pool_layer)
dense_2_layer = Dense(len(self.label2idx), activation='sigmoid')(dense_1_layer)
model = Model(base_model.inputs, dense_2_layer)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
self.model = model
self.model.summary()
if __name__ == "__main__":
from kashgari.utils.logger import init_logger
from kashgari.corpus import TencentDingdangSLUCorpus
init_logger()
x_data, y_data = TencentDingdangSLUCorpus.get_classification_data()
classifier = CNNModel()
classifier.fit(x_data, y_data, epochs=1)
classifier.save('./classifier_saved2')
model = ClassificationModel.load_model('./classifier_saved2')
logging.info(model.predict('我要听音乐'))
| 29.645161 | 100 | 0.668662 |
cd7409349d22891410042df2bb9dc45f910151ff | 532 | py | Python | src/flaskApi/manager.py | MaiXiaochai/flask | 31547477ae17f263888f285a8e4c9d6bd473fdc9 | [
"Apache-2.0"
] | null | null | null | src/flaskApi/manager.py | MaiXiaochai/flask | 31547477ae17f263888f285a8e4c9d6bd473fdc9 | [
"Apache-2.0"
] | null | null | null | src/flaskApi/manager.py | MaiXiaochai/flask | 31547477ae17f263888f285a8e4c9d6bd473fdc9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
--------------------------------------
@File : manager.py
@Author : maixiaochai
@Email : maixiaochai@outlook.com
@Created on : 2020/4/26 22:05
--------------------------------------
"""
from os import environ
from flask_script import Manager
from flask_migrate import MigrateCommand
from App import create_app
env = environ.get("FLASK_ENV", "default")
app = create_app(env)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == "__main__":
manager.run()
| 21.28 | 41 | 0.597744 |
f78e9d7cf794941729ea501c432c299282610e09 | 961 | py | Python | releases/dogwood/3/fun/config/lms/storage.py | SergioSim/learning-analytics-playground | f02e160a0e7b81fe141ed47662785a08bcc684d8 | [
"MIT"
] | 1 | 2021-12-13T09:05:59.000Z | 2021-12-13T09:05:59.000Z | releases/dogwood/3/fun/config/lms/storage.py | SergioSim/learning-analytics-playground | f02e160a0e7b81fe141ed47662785a08bcc684d8 | [
"MIT"
] | 3 | 2021-05-18T08:26:51.000Z | 2022-03-14T10:34:36.000Z | releases/dogwood/3/fun/config/lms/storage.py | SergioSim/learning-analytics-playground | f02e160a0e7b81fe141ed47662785a08bcc684d8 | [
"MIT"
] | 1 | 2021-06-03T14:21:56.000Z | 2021-06-03T14:21:56.000Z | """Django static file storage backend for OpenEdX."""
from django.conf import settings
from pipeline.storage import PipelineCachedStorage
from openedx.core.storage import ProductionStorage
class CDNMixin(object):
"""Mixin to activate CDN urls on a static files storage backend."""
def url(self, name, force=False):
"""Prepend static files path by the CDN base url when configured in settings."""
url = super(CDNMixin, self).url(name, force=force)
cdn_base_url = getattr(settings, "CDN_BASE_URL", None)
if cdn_base_url:
url = "{:s}{:s}".format(cdn_base_url, url)
return url
class CDNProductionStorage(CDNMixin, ProductionStorage):
"""Open edX LMS production static files storage backend that can be placed behing a CDN."""
class CDNPipelineCachedStorage(CDNMixin, PipelineCachedStorage):
"""Open edX Studio production static files storage backend that can be placed behing a CDN."""
| 33.137931 | 98 | 0.721124 |
ba9b61e35b59acadc1efb27b009bebff45066659 | 2,989 | py | Python | libfmp/c3/c3s1_audio_feature.py | arfon/libfmp | 86f39a323f948a5f104f768442359e93620b2bab | [
"MIT"
] | 55 | 2020-12-14T08:33:10.000Z | 2022-03-22T16:36:41.000Z | libfmp/c3/c3s1_audio_feature.py | arfon/libfmp | 86f39a323f948a5f104f768442359e93620b2bab | [
"MIT"
] | 6 | 2021-06-25T09:11:29.000Z | 2021-12-17T13:55:19.000Z | libfmp/c3/c3s1_audio_feature.py | arfon/libfmp | 86f39a323f948a5f104f768442359e93620b2bab | [
"MIT"
] | 8 | 2021-06-30T08:34:38.000Z | 2022-01-11T15:59:17.000Z | """
Module: libfmp.c3.c3s1_audio_feature
Author: Frank Zalkow, Meinard Müller
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
import numpy as np
from numba import jit
@jit(nopython=True)
def f_pitch(p, pitch_ref=69, freq_ref=440.0):
"""Computes the center frequency/ies of a MIDI pitch
Notebook: C3/C3S1_SpecLogFreq-Chromagram.ipynb
Args:
p (float): MIDI pitch value(s)
pitch_ref (float): Reference pitch (default: 69)
freq_ref (float): Frequency of reference pitch (default: 440.0)
Returns:
freqs (float): Frequency value(s)
"""
return 2 ** ((p - pitch_ref) / 12) * freq_ref
@jit(nopython=True)
def pool_pitch(p, Fs, N, pitch_ref=69, freq_ref=440.0):
"""Computes the set of frequency indices that are assigned to a given pitch
Notebook: C3/C3S1_SpecLogFreq-Chromagram.ipynb
Args:
p (float): MIDI pitch value
Fs (scalar): Sampling rate
N (int): Window size of Fourier fransform
pitch_ref (float): Reference pitch (default: 69)
freq_ref (float): Frequency of reference pitch (default: 440.0)
Returns:
k (np.ndarray): Set of frequency indices
"""
lower = f_pitch(p - 0.5, pitch_ref, freq_ref)
upper = f_pitch(p + 0.5, pitch_ref, freq_ref)
k = np.arange(N // 2 + 1)
k_freq = k * Fs / N # F_coef(k, Fs, N)
mask = np.logical_and(lower <= k_freq, k_freq < upper)
return k[mask]
@jit(nopython=True)
def compute_spec_log_freq(Y, Fs, N):
"""Computes a log-frequency spectrogram
Notebook: C3/C3S1_SpecLogFreq-Chromagram.ipynb
Args:
Y (np.ndarray): Magnitude or power spectrogram
Fs (scalar): Sampling rate
N (int): Window size of Fourier fransform
Returns:
Y_LF (np.ndarray): Log-frequency spectrogram
F_coef_pitch (np.ndarray): Pitch values
"""
Y_LF = np.zeros((128, Y.shape[1]))
for p in range(128):
k = pool_pitch(p, Fs, N)
Y_LF[p, :] = Y[k, :].sum(axis=0)
F_coef_pitch = np.arange(128)
return Y_LF, F_coef_pitch
@jit(nopython=True)
def compute_chromagram(Y_LF):
"""Computes a chromagram
Notebook: C3/C3S1_SpecLogFreq-Chromagram.ipynb
Args:
Y_LF (np.ndarray): Log-frequency spectrogram
Returns:
C (np.ndarray): Chromagram
"""
C = np.zeros((12, Y_LF.shape[1]))
p = np.arange(128)
for c in range(12):
mask = (p % 12) == c
C[c, :] = Y_LF[mask, :].sum(axis=0)
return C
def note_name(p):
"""Returns note name of pitch
Notebook: C3/C3S1_SpecLogFreq-Chromagram.ipynb
Args:
p (int): Pitch value
Returns:
name (str): Note name
"""
chroma = ['A', 'A$^\\sharp$', 'B', 'C', 'C$^\\sharp$', 'D', 'D$^\\sharp$', 'E', 'F', 'F$^\\sharp$', 'G',
'G$^\\sharp$']
name = chroma[(p - 69) % 12] + str(p // 12 - 1)
return name
| 26.927928 | 108 | 0.614921 |
15429e824529741215801dcd7fad2b39766f083c | 2,612 | py | Python | project/scripts/clausecat/clause_aggregation.py | svlandeg/healthsea | d3527e96630f59a07dccda7d6eae79e905e98a02 | [
"MIT"
] | null | null | null | project/scripts/clausecat/clause_aggregation.py | svlandeg/healthsea | d3527e96630f59a07dccda7d6eae79e905e98a02 | [
"MIT"
] | null | null | null | project/scripts/clausecat/clause_aggregation.py | svlandeg/healthsea | d3527e96630f59a07dccda7d6eae79e905e98a02 | [
"MIT"
] | null | null | null | from spacy.tokens import Doc
from spacy.language import Language
import operator
@Language.factory("healthsea.aggregation")
def create_clause_aggregation(nlp: Language, name: str):
return Clause_aggregation(nlp)
class Clause_aggregation:
"""Aggregate the predicted effects from the clausecat and apply the patient information logic"""
def __init__(self, nlp: Language):
self.nlp = nlp
def __call__(self, doc: Doc):
patient_information = []
health_effects = {}
for clause in doc._.clauses:
classification = max(clause["cats"].items(), key=operator.itemgetter(1))[0]
if not clause["has_ent"]:
if len(patient_information) > 0:
patient_information[-1][1].append(classification)
continue
entity = str(clause["ent_name"]).replace(" ", "_").strip().lower()
# Collect patient information
if classification == "ANAMNESIS" and entity != None:
patient_information.append((entity, []))
# Collect health effects
if entity != None:
if entity not in health_effects:
health_effects[entity] = {
"effects": [],
"effect": "NEUTRAL",
"label": str(clause["blinder"])
.replace("_", "")
.replace("_", ""),
"text": clause["ent_name"],
}
health_effects[entity]["effects"].append(classification)
# Add patient information to list of health effects
for patient_health in patient_information:
entity = patient_health[0]
score = 0
health_effects[entity]["effects"] += patient_health[1]
# Aggregate health effects
for entity in health_effects:
score = 0
for classification in health_effects[entity]["effects"]:
if classification == "POSITIVE":
score += 1
elif classification == "NEGATIVE":
score -= 1
if score > 0:
aggregated_classification = "POSITIVE"
elif score < 0:
aggregated_classification = "NEGATIVE"
else:
aggregated_classification = "NEUTRAL"
health_effects[entity]["effect"] = aggregated_classification
doc.set_extension("health_effects", default={}, force=True)
doc._.health_effects = health_effects
return doc
| 33.487179 | 100 | 0.551685 |
38aaf0c133c21fcb8a1643ad7457797efa01cafd | 17,915 | py | Python | rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_hermite.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_hermite.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_hermite.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | """Tests for hermendre module.
"""
import numpy as np
import numpy.polynomial.hermite as herm
import numpy.polynomial.polynomial as poly
from numpy.testing import *
H0 = np.array([ 1])
H1 = np.array([0, 2])
H2 = np.array([ -2, 0, 4])
H3 = np.array([0, -12, 0, 8])
H4 = np.array([ 12, 0, -48, 0, 16])
H5 = np.array([0, 120, 0, -160, 0, 32])
H6 = np.array([-120, 0, 720, 0, -480, 0, 64])
H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128])
H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256])
H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512])
Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9]
def trim(x) :
return herm.hermtrim(x, tol=1e-6)
class TestConstants(TestCase) :
def test_hermdomain(self) :
assert_equal(herm.hermdomain, [-1, 1])
def test_hermzero(self) :
assert_equal(herm.hermzero, [0])
def test_hermone(self) :
assert_equal(herm.hermone, [1])
def test_hermx(self) :
assert_equal(herm.hermx, [0, .5])
class TestArithmetic(TestCase) :
x = np.linspace(-3, 3, 100)
y0 = poly.polyval(x, H0)
y1 = poly.polyval(x, H1)
y2 = poly.polyval(x, H2)
y3 = poly.polyval(x, H3)
y4 = poly.polyval(x, H4)
y5 = poly.polyval(x, H5)
y6 = poly.polyval(x, H6)
y7 = poly.polyval(x, H7)
y8 = poly.polyval(x, H8)
y9 = poly.polyval(x, H9)
y = [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9]
def test_hermval(self) :
def f(x) :
return x*(x**2 - 1)
#check empty input
assert_equal(herm.hermval([], [1]).size, 0)
#check normal input)
for i in range(10) :
msg = "At i=%d" % i
ser = np.zeros
tgt = self.y[i]
res = herm.hermval(self.x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(herm.hermval(x, [1]).shape, dims)
assert_equal(herm.hermval(x, [1,0]).shape, dims)
assert_equal(herm.hermval(x, [1,0,0]).shape, dims)
def test_hermadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herm.hermadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermsub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herm.hermsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermmulx(self):
assert_equal(herm.hermmulx([0]), [0])
assert_equal(herm.hermmulx([1]), [0,.5])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, .5]
assert_equal(herm.hermmulx(ser), tgt)
def test_hermmul(self) :
# check values of result
for i in range(5) :
pol1 = [0]*i + [1]
val1 = herm.hermval(self.x, pol1)
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
pol2 = [0]*j + [1]
val2 = herm.hermval(self.x, pol2)
pol3 = herm.hermmul(pol1, pol2)
val3 = herm.hermval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermdiv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herm.hermadd(ci, cj)
quo, rem = herm.hermdiv(tgt, ci)
res = herm.hermadd(herm.hermmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestCalculus(TestCase) :
def test_hermint(self) :
# check exceptions
assert_raises(ValueError, herm.hermint, [0], .5)
assert_raises(ValueError, herm.hermint, [0], -1)
assert_raises(ValueError, herm.hermint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herm.hermint([0], m=i, k=k)
assert_almost_equal(res, [0, .5])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i])
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herm.hermval(-1, hermint), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herm.hermint(tgt, m=1)
res = herm.hermint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herm.hermint(tgt, m=1, k=[k])
res = herm.hermint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermder(self) :
# check exceptions
assert_raises(ValueError, herm.hermder, [0], .5)
assert_raises(ValueError, herm.hermder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [1] + [0]*i
res = herm.hermder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = herm.hermder(herm.hermint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
class TestMisc(TestCase) :
def test_hermfromroots(self) :
res = herm.hermfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herm.hermfromroots(roots)
res = herm.hermval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herm.herm2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermroots(self) :
assert_almost_equal(herm.hermroots([1]), [])
assert_almost_equal(herm.hermroots([1, 1]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = herm.hermroots(herm.hermfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermvander(self) :
# check for 1d x
x = np.arange(3)
v = herm.hermvander(x, 3)
assert_(v.shape == (3,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], herm.hermval(x, coef))
# check for 2d x
x = np.array([[1,2],[3,4],[5,6]])
v = herm.hermvander(x, 3)
assert_(v.shape == (3,2,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], herm.hermval(x, coef))
def test_hermfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, herm.hermfit, [1], [1], -1)
assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)
assert_raises(TypeError, herm.hermfit, [], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)
assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = herm.hermfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
#
coef4 = herm.hermfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
#
coef2d = herm.hermfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herm.hermfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herm.hermfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def test_hermtrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herm.hermtrim, coef, -1)
# Test results
assert_equal(herm.hermtrim(coef), coef[:-1])
assert_equal(herm.hermtrim(coef, 1), coef[:-3])
assert_equal(herm.hermtrim(coef, 2), [0])
def test_hermline(self) :
assert_equal(herm.hermline(3,4), [3, 2])
def test_herm2poly(self) :
for i in range(10) :
assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i])
def test_poly2herm(self) :
for i in range(10) :
assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1])
def assert_poly_almost_equal(p1, p2):
assert_almost_equal(p1.coef, p2.coef)
assert_equal(p1.domain, p2.domain)
class TestHermiteClass(TestCase) :
p1 = herm.Hermite([1,2,3])
p2 = herm.Hermite([1,2,3], [0,1])
p3 = herm.Hermite([1,2])
p4 = herm.Hermite([2,2,3])
p5 = herm.Hermite([3,2,3])
def test_equal(self) :
assert_(self.p1 == self.p1)
assert_(self.p2 == self.p2)
assert_(not self.p1 == self.p2)
assert_(not self.p1 == self.p3)
assert_(not self.p1 == [1,2,3])
def test_not_equal(self) :
assert_(not self.p1 != self.p1)
assert_(not self.p2 != self.p2)
assert_(self.p1 != self.p2)
assert_(self.p1 != self.p3)
assert_(self.p1 != [1,2,3])
def test_add(self) :
tgt = herm.Hermite([2,4,6])
assert_(self.p1 + self.p1 == tgt)
assert_(self.p1 + [1,2,3] == tgt)
assert_([1,2,3] + self.p1 == tgt)
def test_sub(self) :
tgt = herm.Hermite([1])
assert_(self.p4 - self.p1 == tgt)
assert_(self.p4 - [1,2,3] == tgt)
assert_([2,2,3] - self.p1 == tgt)
def test_mul(self) :
tgt = herm.Hermite([ 81., 52., 82., 12., 9.])
assert_poly_almost_equal(self.p1 * self.p1, tgt)
assert_poly_almost_equal(self.p1 * [1,2,3], tgt)
assert_poly_almost_equal([1,2,3] * self.p1, tgt)
def test_floordiv(self) :
tgt = herm.Hermite([1])
assert_(self.p4 // self.p1 == tgt)
assert_(self.p4 // [1,2,3] == tgt)
assert_([2,2,3] // self.p1 == tgt)
def test_mod(self) :
tgt = herm.Hermite([1])
assert_((self.p4 % self.p1) == tgt)
assert_((self.p4 % [1,2,3]) == tgt)
assert_(([2,2,3] % self.p1) == tgt)
def test_divmod(self) :
tquo = herm.Hermite([1])
trem = herm.Hermite([2])
quo, rem = divmod(self.p5, self.p1)
assert_(quo == tquo and rem == trem)
quo, rem = divmod(self.p5, [1,2,3])
assert_(quo == tquo and rem == trem)
quo, rem = divmod([3,2,3], self.p1)
assert_(quo == tquo and rem == trem)
def test_pow(self) :
tgt = herm.Hermite([1])
for i in range(5) :
res = self.p1**i
assert_(res == tgt)
tgt = tgt*self.p1
def test_call(self) :
# domain = [-1, 1]
x = np.linspace(-1, 1)
tgt = 3*(4*x**2 - 2) + 2*(2*x) + 1
assert_almost_equal(self.p1(x), tgt)
# domain = [0, 1]
x = np.linspace(0, 1)
xx = 2*x - 1
assert_almost_equal(self.p2(x), self.p1(xx))
def test_degree(self) :
assert_equal(self.p1.degree(), 2)
def test_cutdeg(self) :
assert_raises(ValueError, self.p1.cutdeg, .5)
assert_raises(ValueError, self.p1.cutdeg, -1)
assert_equal(len(self.p1.cutdeg(3)), 3)
assert_equal(len(self.p1.cutdeg(2)), 3)
assert_equal(len(self.p1.cutdeg(1)), 2)
assert_equal(len(self.p1.cutdeg(0)), 1)
def test_convert(self) :
x = np.linspace(-1,1)
p = self.p1.convert(domain=[0,1])
assert_almost_equal(p(x), self.p1(x))
def test_mapparms(self) :
parms = self.p2.mapparms()
assert_almost_equal(parms, [-1, 2])
def test_trim(self) :
coef = [1, 1e-6, 1e-12, 0]
p = herm.Hermite(coef)
assert_equal(p.trim().coef, coef[:3])
assert_equal(p.trim(1e-10).coef, coef[:2])
assert_equal(p.trim(1e-5).coef, coef[:1])
def test_truncate(self) :
assert_raises(ValueError, self.p1.truncate, .5)
assert_raises(ValueError, self.p1.truncate, 0)
assert_equal(len(self.p1.truncate(4)), 3)
assert_equal(len(self.p1.truncate(3)), 3)
assert_equal(len(self.p1.truncate(2)), 2)
assert_equal(len(self.p1.truncate(1)), 1)
def test_copy(self) :
p = self.p1.copy()
assert_(self.p1 == p)
def test_integ(self) :
p = self.p2.integ()
assert_almost_equal(p.coef, herm.hermint([1,2,3], 1, 0, scl=.5))
p = self.p2.integ(lbnd=0)
assert_almost_equal(p(0), 0)
p = self.p2.integ(1, 1)
assert_almost_equal(p.coef, herm.hermint([1,2,3], 1, 1, scl=.5))
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.coef, herm.hermint([1,2,3], 2, [1,2], scl=.5))
def test_deriv(self) :
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)
assert_almost_equal(p.deriv(2).coef, self.p2.coef)
def test_roots(self) :
p = herm.Hermite(herm.poly2herm([0, -1, 0, 1]), [0, 1])
res = p.roots()
tgt = [0, .5, 1]
assert_almost_equal(res, tgt)
def test_linspace(self):
xdes = np.linspace(0, 1, 20)
ydes = self.p2(xdes)
xres, yres = self.p2.linspace(20)
assert_almost_equal(xres, xdes)
assert_almost_equal(yres, ydes)
def test_fromroots(self) :
roots = [0, .5, 1]
p = herm.Hermite.fromroots(roots, domain=[0, 1])
res = p.coef
tgt = herm.poly2herm([0, -1, 0, 1])
assert_almost_equal(res, tgt)
def test_fit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
x = np.linspace(0,3)
y = f(x)
# test default value of domain
p = herm.Hermite.fit(x, y, 3)
assert_almost_equal(p.domain, [0,3])
# test that fit works in given domains
p = herm.Hermite.fit(x, y, 3, None)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [0,3])
p = herm.Hermite.fit(x, y, 3, [])
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [-1, 1])
# test that fit accepts weights.
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
p = herm.Hermite.fit(x, yw, 3, w=w)
assert_almost_equal(p(x), y)
def test_identity(self) :
x = np.linspace(0,3)
p = herm.Hermite.identity()
assert_almost_equal(p(x), x)
p = herm.Hermite.identity([1,3])
assert_almost_equal(p(x), x)
#
if __name__ == "__main__":
run_module_suite()
| 33.299257 | 78 | 0.51242 |
a68de5e58838d385cbaed0c1b6a363f8cdf83c2e | 18,457 | py | Python | anndata/tests/test_readwrite.py | chris-rands/anndata | aae267645937d849d2a1ea0af4c601d9d0c11122 | [
"BSD-3-Clause"
] | null | null | null | anndata/tests/test_readwrite.py | chris-rands/anndata | aae267645937d849d2a1ea0af4c601d9d0c11122 | [
"BSD-3-Clause"
] | null | null | null | anndata/tests/test_readwrite.py | chris-rands/anndata | aae267645937d849d2a1ea0af4c601d9d0c11122 | [
"BSD-3-Clause"
] | null | null | null | from importlib.util import find_spec
from pathlib import Path
from string import ascii_letters
import tempfile
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical
import pytest
from scipy.sparse import csr_matrix, csc_matrix
import anndata as ad
from anndata.tests.helpers import gen_adata, asarray, assert_equal
HERE = Path(__file__).parent
# ------------------------------------------------------------------------------
# Some test data
# ------------------------------------------------------------------------------
X_sp = csr_matrix([[1, 0, 0], [3, 0, 0], [5, 6, 0], [0, 0, 0], [0, 0, 0]])
X_list = [[1, 0], [3, 0], [5, 6]] # data matrix of shape n_obs x n_vars
obs_dict = dict( # annotation of observations / rows
row_names=["name1", "name2", "name3"], # row annotation
oanno1=["cat1", "cat2", "cat2"], # categorical annotation
oanno1b=["cat1", "cat1", "cat1",], # categorical annotation with one category
oanno1c=["cat1", "cat1", np.nan,], # categorical annotation with a missing value
oanno2=["o1", "o2", "o3"], # string annotation
oanno3=[2.1, 2.2, 2.3], # float annotation
oanno4=[3.3, 1.1, 2.2], # float annotation
)
var_dict = dict( # annotation of variables / columns
vanno1=[3.1, 3.2],
vanno2=["cat1", "cat1"], # categorical annotation
vanno3=[2.1, 2.2], # float annotation
vanno4=[3.3, 1.1], # float annotation
)
uns_dict = dict( # unstructured annotation
oanno1_colors=["#000000", "#FFFFFF"],
uns2=["some annotation"],
uns3="another annotation",
uns4=dict(
a=1,
b=[2, 3],
c="4",
d=["some", "strings"],
e=np.ones(5),
f=np.int32(7),
g=[1, np.float32(2.5)],
),
)
@pytest.fixture(params=[{}, dict(compression="gzip")])
def dataset_kwargs(request):
return request.param
@pytest.fixture(params=["h5ad", "zarr"])
def diskfmt(request):
return request.param
@pytest.fixture
def rw(backing_h5ad):
M, N = 100, 101
orig = gen_adata((M, N))
orig.write(backing_h5ad)
curr = ad.read(backing_h5ad)
return curr, orig
diskfmt2 = diskfmt
# ------------------------------------------------------------------------------
# The test functions
# ------------------------------------------------------------------------------
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_readwrite_roundtrip(typ, tmp_path, diskfmt, diskfmt2):
tmpdir = Path(tmp_path)
pth1 = tmpdir / f"first.{diskfmt}"
write1 = lambda x: getattr(x, f"write_{diskfmt}")(pth1)
read1 = lambda: getattr(ad, f"read_{diskfmt}")(pth1)
pth2 = tmpdir / f"second.{diskfmt2}"
write2 = lambda x: getattr(x, f"write_{diskfmt2}")(pth2)
read2 = lambda: getattr(ad, f"read_{diskfmt2}")(pth2)
adata1 = ad.AnnData(typ(X_list), obs=obs_dict, var=var_dict, uns=uns_dict)
write1(adata1)
adata2 = read1()
write2(adata2)
adata3 = read2()
assert_equal(adata2, adata1)
assert_equal(adata3, adata1)
assert_equal(adata2, adata1)
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_readwrite_h5ad(typ, dataset_kwargs, backing_h5ad):
tmpdir = tempfile.TemporaryDirectory()
tmpdirpth = Path(tmpdir.name)
mid_pth = tmpdirpth / "mid.h5ad"
X = typ(X_list)
adata_src = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict)
assert not is_categorical(adata_src.obs["oanno1"])
adata_src.raw = adata_src
adata_src.write(backing_h5ad, **dataset_kwargs)
adata_mid = ad.read(backing_h5ad)
adata_mid.write(mid_pth, **dataset_kwargs)
adata = ad.read_h5ad(mid_pth)
assert is_categorical(adata.obs["oanno1"])
assert not is_categorical(adata.obs["oanno2"])
assert adata.obs.index.tolist() == ["name1", "name2", "name3"]
assert adata.obs["oanno1"].cat.categories.tolist() == ["cat1", "cat2"]
assert is_categorical(adata.raw.var["vanno2"])
assert np.all(adata.obs == adata_src.obs)
assert np.all(adata.var == adata_src.var)
assert np.all(adata.var.index == adata_src.var.index)
assert adata.var.index.dtype == adata_src.var.index.dtype
assert type(adata.raw.X) is type(adata_src.raw.X)
assert type(adata.raw.varm) is type(adata_src.raw.varm)
assert np.allclose(asarray(adata.raw.X), asarray(adata_src.raw.X))
assert np.all(adata.raw.var == adata_src.raw.var)
assert isinstance(adata.uns["uns4"]["a"], (int, np.integer))
assert isinstance(adata_src.uns["uns4"]["a"], (int, np.integer))
assert type(adata.uns["uns4"]["c"]) is type(adata_src.uns["uns4"]["c"])
assert_equal(adata, adata_src)
@pytest.mark.skipif(not find_spec("zarr"), reason="Zarr is not installed")
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_readwrite_zarr(typ, tmp_path):
X = typ(X_list)
adata_src = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict)
adata_src.raw = adata_src
assert not is_categorical(adata_src.obs["oanno1"])
adata_src.write_zarr(tmp_path / "test_zarr_dir", chunks=True)
adata = ad.read_zarr(tmp_path / "test_zarr_dir")
assert is_categorical(adata.obs["oanno1"])
assert not is_categorical(adata.obs["oanno2"])
assert adata.obs.index.tolist() == ["name1", "name2", "name3"]
assert adata.obs["oanno1"].cat.categories.tolist() == ["cat1", "cat2"]
assert is_categorical(adata.raw.var["vanno2"])
assert np.all(adata.obs == adata_src.obs)
assert np.all(adata.var == adata_src.var)
assert np.all(adata.var.index == adata_src.var.index)
assert adata.var.index.dtype == adata_src.var.index.dtype
assert type(adata.raw.X) is type(adata_src.raw.X)
assert np.allclose(asarray(adata.raw.X), asarray(adata_src.raw.X))
assert np.all(adata.raw.var == adata_src.raw.var)
assert isinstance(adata.uns["uns4"]["a"], (int, np.integer))
assert isinstance(adata_src.uns["uns4"]["a"], (int, np.integer))
assert type(adata.uns["uns4"]["c"]) is type(adata_src.uns["uns4"]["c"])
assert_equal(adata, adata_src)
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_readwrite_maintain_X_dtype(typ, backing_h5ad):
X = typ(X_list)
adata_src = ad.AnnData(X, dtype="int8")
adata_src.write(backing_h5ad)
adata = ad.read(backing_h5ad)
assert adata.X.dtype == adata_src.X.dtype
def test_read_write_maintain_obsmvarm_dtypes(rw):
curr, orig = rw
assert type(orig.obsm["array"]) is type(curr.obsm["array"])
assert np.all(orig.obsm["array"] == curr.obsm["array"])
assert np.all(orig.varm["array"] == curr.varm["array"])
assert type(orig.obsm["sparse"]) is type(curr.obsm["sparse"])
assert not np.any((orig.obsm["sparse"] != curr.obsm["sparse"]).toarray())
assert not np.any((orig.varm["sparse"] != curr.varm["sparse"]).toarray())
assert type(orig.obsm["df"]) is type(curr.obsm["df"])
assert np.all(orig.obsm["df"] == curr.obsm["df"])
assert np.all(orig.varm["df"] == curr.varm["df"])
def test_maintain_layers(rw):
curr, orig = rw
assert type(orig.layers["array"]) is type(curr.layers["array"])
assert np.all(orig.layers["array"] == curr.layers["array"])
assert type(orig.layers["sparse"]) is type(curr.layers["sparse"])
assert not np.any((orig.layers["sparse"] != curr.layers["sparse"]).toarray())
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_readwrite_h5ad_one_dimension(typ, backing_h5ad):
X = typ(X_list)
adata_src = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict)
adata_one = adata_src[:, 0].copy()
adata_one.write(backing_h5ad)
adata = ad.read(backing_h5ad)
assert adata.shape == (3, 1)
assert_equal(adata, adata_one)
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_readwrite_backed(typ, backing_h5ad):
X = typ(X_list)
adata_src = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict)
adata_src.filename = backing_h5ad # change to backed mode
adata_src.write()
adata = ad.read(backing_h5ad)
assert is_categorical(adata.obs["oanno1"])
assert not is_categorical(adata.obs["oanno2"])
assert adata.obs.index.tolist() == ["name1", "name2", "name3"]
assert adata.obs["oanno1"].cat.categories.tolist() == ["cat1", "cat2"]
assert_equal(adata, adata_src)
@pytest.mark.parametrize("typ", [np.array, csr_matrix, csc_matrix])
def test_readwrite_equivalent_h5ad_zarr(typ):
tmpdir = tempfile.TemporaryDirectory()
tmpdirpth = Path(tmpdir.name)
h5ad_pth = tmpdirpth / "adata.h5ad"
zarr_pth = tmpdirpth / "adata.zarr"
M, N = 100, 101
adata = gen_adata((M, N), X_type=typ)
adata.raw = adata
adata.write_h5ad(h5ad_pth)
adata.write_zarr(zarr_pth)
from_h5ad = ad.read_h5ad(h5ad_pth)
from_zarr = ad.read_zarr(zarr_pth)
assert_equal(from_h5ad, from_zarr, exact=True)
def test_changed_obs_var_names(tmp_path, diskfmt):
filepth = tmp_path / f"test.{diskfmt}"
orig = gen_adata((10, 10))
orig.obs_names.name = "obs"
orig.var_names.name = "var"
modified = orig.copy()
modified.obs_names.name = "cells"
modified.var_names.name = "genes"
getattr(orig, f"write_{diskfmt}")(filepth)
read = getattr(ad, f"read_{diskfmt}")(filepth)
assert_equal(orig, read, exact=True)
assert orig.var.index.name == "var"
assert read.obs.index.name == "obs"
with pytest.raises(AssertionError):
assert_equal(orig, modified, exact=True)
with pytest.raises(AssertionError):
assert_equal(read, modified, exact=True)
@pytest.mark.skipif(not find_spec("loompy"), reason="Loompy is not installed")
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
@pytest.mark.parametrize("obsm_names", [{}, dict(X_composed=["oanno3", "oanno4"])])
@pytest.mark.parametrize("varm_names", [{}, dict(X_composed2=["vanno3", "vanno4"])])
def test_readwrite_loom(typ, obsm_names, varm_names, tmp_path):
X = typ(X_list)
adata_src = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict)
adata_src.obsm["X_a"] = np.zeros((adata_src.n_obs, 2))
adata_src.varm["X_b"] = np.zeros((adata_src.n_vars, 3))
adata_src.write_loom(tmp_path / "test.loom", write_obsm_varm=True)
adata = ad.read_loom(
tmp_path / "test.loom",
sparse=typ is csr_matrix,
obsm_names=obsm_names,
varm_names=varm_names,
cleanup=True,
)
if isinstance(X, np.ndarray):
assert np.allclose(adata.X, X)
else:
# TODO: this should not be necessary
assert np.allclose(adata.X.toarray(), X.toarray())
assert "X_a" in adata.obsm_keys() and adata.obsm["X_a"].shape[1] == 2
assert "X_b" in adata.varm_keys() and adata.varm["X_b"].shape[1] == 3
# as we called with `cleanup=True`
assert "oanno1b" in adata.uns["loom-obs"]
assert "vanno2" in adata.uns["loom-var"]
for k, v in obsm_names.items():
assert k in adata.obsm_keys() and adata.obsm[k].shape[1] == len(v)
for k, v in varm_names.items():
assert k in adata.varm_keys() and adata.varm[k].shape[1] == len(v)
def test_read_csv():
adata = ad.read_csv(HERE / "adata.csv")
assert adata.obs_names.tolist() == ["r1", "r2", "r3"]
assert adata.var_names.tolist() == ["c1", "c2"]
assert adata.X.tolist() == X_list
def test_read_tsv_strpath():
adata = ad.read_text(str(HERE / "adata-comments.tsv"), "\t")
assert adata.obs_names.tolist() == ["r1", "r2", "r3"]
assert adata.var_names.tolist() == ["c1", "c2"]
assert adata.X.tolist() == X_list
def test_read_tsv_iter():
with (HERE / "adata-comments.tsv").open() as f:
adata = ad.read_text(f, "\t")
assert adata.obs_names.tolist() == ["r1", "r2", "r3"]
assert adata.var_names.tolist() == ["c1", "c2"]
assert adata.X.tolist() == X_list
@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_write_csv(typ, tmp_path):
X = typ(X_list)
adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict)
adata.write_csvs(tmp_path / "test_csv_dir", skip_data=False)
@pytest.mark.parametrize(
["read", "write", "name"],
[
pytest.param(ad.read_h5ad, ad._io.write._write_h5ad, "test_empty.h5ad"),
pytest.param(
ad.read_loom,
ad._io.write_loom,
"test_empty.loom",
marks=pytest.mark.xfail(reason="Loom can’t handle 0×0 matrices"),
),
pytest.param(ad.read_zarr, ad._io.write_zarr, "test_empty.zarr"),
pytest.param(
ad.read_zarr,
ad._io.write_zarr,
"test_empty.zip",
marks=pytest.mark.xfail(reason="Zarr zip storage doesn’t seem to work…"),
),
],
)
def test_readwrite_hdf5_empty(read, write, name, tmp_path):
if read is ad.read_zarr:
pytest.importorskip("zarr")
adata = ad.AnnData(uns=dict(empty=np.array([], dtype=float)))
write(tmp_path / name, adata)
ad_read = read(tmp_path / name)
assert ad_read.uns["empty"].shape == (0,)
def test_read_excel():
adata = ad.read_excel(HERE / "data/excel.xlsx", "Sheet1", dtype=int)
assert adata.X.tolist() == X_list
def test_write_categorical(tmp_path, diskfmt):
adata_pth = tmp_path / f"adata.{diskfmt}"
orig = ad.AnnData(
X=np.ones((5, 5)),
obs=pd.DataFrame(
dict(
cat1=["a", "a", "b", np.nan, np.nan],
cat2=pd.Categorical(["a", "a", "b", np.nan, np.nan]),
)
),
)
getattr(orig, f"write_{diskfmt}")(adata_pth)
curr = getattr(ad, f"read_{diskfmt}")(adata_pth)
assert np.all(orig.obs.notna() == curr.obs.notna())
assert np.all(orig.obs.stack().dropna() == curr.obs.stack().dropna())
def test_write_categorical_index(tmp_path, diskfmt):
adata_pth = tmp_path / f"adata.{diskfmt}"
orig = ad.AnnData(
X=np.ones((5, 5)),
uns={"df": pd.DataFrame(index=pd.Categorical(list("aabcd")))},
)
getattr(orig, f"write_{diskfmt}")(adata_pth)
curr = getattr(ad, f"read_{diskfmt}")(adata_pth)
# Also covered by next assertion, but checking this value specifically
pd.testing.assert_index_equal(
orig.uns["df"].index, curr.uns["df"].index, exact=True
)
assert_equal(orig, curr, exact=True)
def test_dataframe_reserved_columns(tmp_path, diskfmt):
reserved = ("_index", "__categories")
adata_pth = tmp_path / f"adata.{diskfmt}"
orig = ad.AnnData(X=np.ones((5, 5)))
for colname in reserved:
to_write = orig.copy()
to_write.obs[colname] = np.ones(5)
with pytest.raises(ValueError) as e:
getattr(to_write, f"write_{diskfmt}")(adata_pth)
assert colname in str(e.value)
for colname in reserved:
to_write = orig.copy()
to_write.varm["df"] = pd.DataFrame(
{colname: list("aabcd")}, index=to_write.var_names
)
with pytest.raises(ValueError) as e:
getattr(to_write, f"write_{diskfmt}")(adata_pth)
assert colname in str(e.value)
def test_write_large_categorical(tmp_path, diskfmt):
M = 30_000
N = 1000
ls = np.array(list(ascii_letters))
def random_cats(n):
cats = {
"".join(np.random.choice(ls, np.random.choice(range(5, 30))))
for _ in range(n)
}
while len(cats) < n: # For the rare case that there’s duplicates
cats |= random_cats(n - len(cats))
return cats
cats = np.array(sorted(random_cats(10_000)))
adata_pth = tmp_path / f"adata.{diskfmt}"
n_cats = len(np.unique(cats))
orig = ad.AnnData(
csr_matrix(([1], ([0], [0])), shape=(M, N)),
obs=dict(
cat1=cats[np.random.choice(n_cats, M)],
cat2=pd.Categorical.from_codes(np.random.choice(n_cats, M), cats),
),
)
getattr(orig, f"write_{diskfmt}")(adata_pth)
curr = getattr(ad, f"read_{diskfmt}")(adata_pth)
assert_equal(orig, curr)
def test_zarr_chunk_X(tmp_path):
import zarr
zarr_pth = Path(tmp_path) / "test.zarr"
adata = gen_adata((100, 100), X_type=np.array)
adata.write_zarr(zarr_pth, chunks=(10, 10))
z = zarr.open(str(zarr_pth)) # As of v2.3.2 zarr won't take a Path
assert z["X"].chunks == (10, 10)
from_zarr = ad.read_zarr(zarr_pth)
assert_equal(from_zarr, adata)
################################
# Round-tripping scanpy datasets
################################
diskfmt2 = diskfmt
@pytest.mark.skipif(not find_spec("scanpy"), reason="Scanpy is not installed")
def test_scanpy_pbmc68k(tmp_path, diskfmt, diskfmt2):
read1 = lambda pth: getattr(ad, f"read_{diskfmt}")(pth)
write1 = lambda adata, pth: getattr(adata, f"write_{diskfmt}")(pth)
read2 = lambda pth: getattr(ad, f"read_{diskfmt2}")(pth)
write2 = lambda adata, pth: getattr(adata, f"write_{diskfmt2}")(pth)
filepth1 = tmp_path / f"test1.{diskfmt}"
filepth2 = tmp_path / f"test2.{diskfmt2}"
import scanpy as sc
pbmc = sc.datasets.pbmc68k_reduced()
write1(pbmc, filepth1)
from_disk1 = read1(filepth1) # Do we read okay
write2(from_disk1, filepth2) # Can we round trip
from_disk2 = read2(filepth2)
assert_equal(pbmc, from_disk1) # Not expected to be exact due to `nan`s
assert_equal(pbmc, from_disk2)
@pytest.mark.skipif(not find_spec("scanpy"), reason="Scanpy is not installed")
def test_scanpy_krumsiek11(tmp_path, diskfmt):
filepth = tmp_path / f"test.{diskfmt}"
import scanpy as sc
orig = sc.datasets.krumsiek11()
del orig.uns["highlights"] # Can't write int keys
getattr(orig, f"write_{diskfmt}")(filepth)
read = getattr(ad, f"read_{diskfmt}")(filepth)
assert_equal(orig, read, exact=True)
# Checking if we can read legacy zarr files
# TODO: Check how I should add this file to the repo
@pytest.mark.skipif(not find_spec("scanpy"), reason="Scanpy is not installed")
@pytest.mark.skipif(
not Path(HERE / "data/pbmc68k_reduced_legacy.zarr.zip").is_file(),
reason="File not present.",
)
def test_backwards_compat_zarr():
import scanpy as sc
import zarr
pbmc_orig = sc.datasets.pbmc68k_reduced()
# Old zarr writer couldn't do sparse arrays
pbmc_orig.raw._X = pbmc_orig.raw.X.toarray()
del pbmc_orig.uns["neighbors"]
# This was written out with anndata=0.6.22.post1
zarrpth = HERE / "data/pbmc68k_reduced_legacy.zarr.zip"
with zarr.ZipStore(zarrpth, mode="r") as z:
pbmc_zarr = ad.read_zarr(z)
assert_equal(pbmc_zarr, pbmc_orig)
| 35.089354 | 85 | 0.64252 |
c470ec1036cb50fe758c534d22da547248931019 | 2,621 | py | Python | keras_rl_dqn.py | bcaramiaux/humane-methods | d0ecfea8e348721e91dd36cf2a17e7868efd48ae | [
"MIT"
] | null | null | null | keras_rl_dqn.py | bcaramiaux/humane-methods | d0ecfea8e348721e91dd36cf2a17e7868efd48ae | [
"MIT"
] | null | null | null | keras_rl_dqn.py | bcaramiaux/humane-methods | d0ecfea8e348721e91dd36cf2a17e7868efd48ae | [
"MIT"
] | 1 | 2020-06-02T10:57:54.000Z | 2020-06-02T10:57:54.000Z | import numpy as np
import gym
import gym_target
import os
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
from rl.callbacks import TrainEpisodeLogger, ModelIntervalCheckpoint, FileLogger
ENV_NAME = 'target-v0'
ENV_NAME = 'CartPole-v0'
# ENV_NAME = 'target-v0'
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
# model.add(Dense(16))
# model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model,
nb_actions=nb_actions,
memory=memory,
nb_steps_warmup=10,
target_model_update=1e-2,
policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
def build_callbacks(env_name):
checkpoint_weights_filename = 'dqn_' + env_name + '_weights_{step}.h5f'
log_filename = 'dqn_{}_log.json'.format(env_name)
callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=5000)]
callbacks += [FileLogger(log_filename, interval=100)]
return callbacks
if os.path.isfile('dqn_{}_weights.h5f'.format(ENV_NAME)):
print('loading')
dqn.load_weights('dqn_{}_weights.h5f'.format(ENV_NAME))
callbacks = build_callbacks(ENV_NAME)
dqn.test(env, nb_episodes=2, verbose=1, nb_max_episode_steps=1000, visualize=False)
#print(history.history)
else:
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
callbacks = build_callbacks(ENV_NAME)
dqn.fit(env, nb_steps=1000, visualize=False, verbose=2, callbacks=callbacks)
# After training is done, we save the final weights.
dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# # Finally, evaluate our algorithm for 5 episodes.
# dqn.test(env, nb_episodes=5, visualize=True) | 33.602564 | 97 | 0.734834 |
0cfc785bf02a26114ece75ffad0513948cd8ab2b | 706 | py | Python | manage.py | abhiabhi94/sample-django-project | 578ce1a9e575138fe2ec2d8b0f4c2b715c6168c4 | [
"Apache-2.0"
] | null | null | null | manage.py | abhiabhi94/sample-django-project | 578ce1a9e575138fe2ec2d8b0f4c2b715c6168c4 | [
"Apache-2.0"
] | 6 | 2021-03-19T00:23:22.000Z | 2021-09-22T18:39:10.000Z | manage.py | abhiabhi94/sample-django-project | 578ce1a9e575138fe2ec2d8b0f4c2b715c6168c4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from setup.setup import project_name
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'{}.settings'.format(project_name))
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.416667 | 73 | 0.667139 |
07d21499543443be8421c1cd4bc8ae013bb5c82d | 14,422 | py | Python | nf_core/list.py | Fohlen/tools | 4ab80b83cd8b6562a88bb38b6aa0a3d082fd4de6 | [
"MIT"
] | null | null | null | nf_core/list.py | Fohlen/tools | 4ab80b83cd8b6562a88bb38b6aa0a3d082fd4de6 | [
"MIT"
] | null | null | null | nf_core/list.py | Fohlen/tools | 4ab80b83cd8b6562a88bb38b6aa0a3d082fd4de6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Lists available nf-core pipelines and versions."""
from __future__ import print_function
from collections import OrderedDict
import click
import datetime
import json
import logging
import os
import re
import subprocess
import sys
import git
import requests
import tabulate
import nf_core.utils
# Set up local caching for requests to speed up remote queries
nf_core.utils.setup_requests_cachedir()
def list_workflows(filter_by=None, sort_by='release', as_json=False):
"""Prints out a list of all nf-core workflows.
Args:
filter_by (list): A list of strings that can be used for filtering.
sort_by (str): workflows can be sorted by keywords. Keyword must be one of
`release` (default), `name`, `stars`.
as_json (boolean): Set to true, if the lists should be printed in JSON.
"""
wfs = Workflows(filter_by, sort_by)
wfs.get_remote_workflows()
wfs.get_local_nf_workflows()
wfs.compare_remote_local()
if as_json:
wfs.print_json()
else:
wfs.print_summary()
class Workflows(object):
"""Workflow container class.
Is used to collect local and remote nf-core pipelines. Pipelines
can be sorted, filtered and compared.
Args:
filter_by (list): A list of strings that can be used for filtering.
sort_by (str): workflows can be sorted by keywords. Keyword must be one of
`release` (default), `name`, `stars`.
"""
def __init__(self, filter_by=None, sort_by='release'):
self.remote_workflows = list()
self.local_workflows = list()
self.local_unmatched = list()
self.keyword_filters = filter_by if filter_by is not None else []
self.sort_workflows_by = sort_by
def get_remote_workflows(self):
"""Retrieves remote workflows from `nf-co.re <http://nf-co.re>`_.
Remote workflows are stored in :attr:`self.remote_workflows` list.
"""
# List all repositories at nf-core
logging.debug("Fetching list of nf-core workflows")
nfcore_url = 'http://nf-co.re/pipelines.json'
response = requests.get(nfcore_url, timeout=10)
if response.status_code == 200:
repos = response.json()['remote_workflows']
for repo in repos:
self.remote_workflows.append(RemoteWorkflow(repo))
def get_local_nf_workflows(self):
"""Retrieves local Nextflow workflows.
Local workflows are stored in :attr:`self.local_workflows` list.
"""
# Try to guess the local cache directory (much faster than calling nextflow)
if os.environ.get('NXF_ASSETS'):
nf_wfdir = os.path.join(os.environ.get('NXF_ASSETS'), 'nf-core')
else:
nf_wfdir = os.path.join(os.getenv("HOME"), '.nextflow', 'assets', 'nf-core')
if os.path.isdir(nf_wfdir):
logging.debug("Guessed nextflow assets directory - pulling nf-core dirnames")
for wf_name in os.listdir(nf_wfdir):
self.local_workflows.append( LocalWorkflow('nf-core/{}'.format(wf_name)) )
# Fetch details about local cached pipelines with `nextflow list`
else:
logging.debug("Getting list of local nextflow workflows")
try:
with open(os.devnull, 'w') as devnull:
nflist_raw = subprocess.check_output(['nextflow', 'list'], stderr=devnull)
except OSError as e:
if e.errno == os.errno.ENOENT:
raise AssertionError("It looks like Nextflow is not installed. It is required for most nf-core functions.")
except subprocess.CalledProcessError as e:
raise AssertionError("`nextflow list` returned non-zero error code: %s,\n %s", e.returncode, e.output)
else:
for wf_name in nflist_raw.splitlines():
if not str(wf_name).startswith('nf-core/'):
self.local_unmatched.append(wf_name)
else:
self.local_workflows.append( LocalWorkflow(wf_name) )
# Find additional information about each workflow by checking its git history
logging.debug("Fetching extra info about {} local workflows".format(len(self.local_workflows)))
for wf in self.local_workflows:
wf.get_local_nf_workflow_details()
def compare_remote_local(self):
"""Matches local to remote workflows.
If a matching remote workflow is found, the local workflow's Git commit hash is compared
with the latest one from remote.
A boolean flag in :attr:`RemoteWorkflow.local_is_latest` is set to True, if the local workflow
is the latest.
"""
for rwf in self.remote_workflows:
for lwf in self.local_workflows:
if rwf.full_name == lwf.full_name:
rwf.local_wf = lwf
if rwf.releases:
if rwf.releases[-1]['tag_sha'] == lwf.commit_sha:
rwf.local_is_latest = True
else:
rwf.local_is_latest = False
def filtered_workflows(self):
"""Filters remote workflows for keywords.
Returns:
list: Filtered remote workflows.
"""
# If no keywords, don't filter
if not self.keyword_filters:
return self.remote_workflows
filtered_workflows = []
for wf in self.remote_workflows:
for k in self.keyword_filters:
in_name = k in wf.name
in_desc = k in wf.description
in_topics = any([ k in t for t in wf.topics])
if not in_name and not in_desc and not in_topics:
break
else:
# We didn't hit a break, so all keywords were found
filtered_workflows.append(wf)
return filtered_workflows
def print_summary(self):
"""Prints a summary of all pipelines."""
filtered_workflows = self.filtered_workflows()
# Sort by released / dev, then alphabetical
if not self.sort_workflows_by or self.sort_workflows_by == 'release':
filtered_workflows.sort(
key=lambda wf: (
(wf.releases[-1].get('published_at_timestamp', 0) if len(wf.releases) > 0 else 0) * -1,
wf.full_name.lower()
)
)
# Sort by date pulled
elif self.sort_workflows_by == 'pulled':
def sort_pulled_date(wf):
try:
return wf.local_wf.last_pull * -1
except:
return 0
filtered_workflows.sort(key=sort_pulled_date)
# Sort by name
elif self.sort_workflows_by == 'name':
filtered_workflows.sort( key=lambda wf: wf.full_name.lower() )
# Sort by stars, then name
elif self.sort_workflows_by == 'stars':
filtered_workflows.sort(
key=lambda wf: (
wf.stargazers_count * -1,
wf.full_name.lower()
)
)
# Build summary list to print
summary = list()
for wf in filtered_workflows:
version = click.style(wf.releases[-1]['tag_name'], fg='blue') if len(wf.releases) > 0 else click.style('dev', fg='yellow')
published = wf.releases[-1]['published_at_pretty'] if len(wf.releases) > 0 else '-'
pulled = wf.local_wf.last_pull_pretty if wf.local_wf is not None else '-'
if wf.local_wf is not None:
is_latest = click.style('Yes', fg='green') if wf.local_is_latest else click.style('No', fg='red')
else:
is_latest = '-'
rowdata = [ wf.full_name, version, published, pulled, is_latest ]
if self.sort_workflows_by == 'stars':
rowdata.insert(1, wf.stargazers_count)
summary.append(rowdata)
t_headers = ['Name', 'Version', 'Released', 'Last Pulled', 'Have latest release?']
if self.sort_workflows_by == 'stars':
t_headers.insert(1, 'Stargazers')
# Print summary table
print("", file=sys.stderr)
print(tabulate.tabulate(summary, headers=t_headers))
print("", file=sys.stderr)
def print_json(self):
""" Dump JSON of all parsed information """
print(json.dumps({
'local_workflows': self.local_workflows,
'remote_workflows': self.remote_workflows
}, default=lambda o: o.__dict__, indent=4))
class RemoteWorkflow(object):
"""A information container for a remote workflow.
Args:
data (dict): workflow information as they are retrieved from the Github repository REST API request
(https://developer.github.com/v3/repos/#get).
"""
def __init__(self, data):
# Vars from the initial data payload
self.name = data.get('name')
self.full_name = data.get('full_name')
self.description = data.get('description')
self.topics = data.get('topics', [])
self.archived = data.get('archived')
self.stargazers_count = data.get('stargazers_count')
self.watchers_count = data.get('watchers_count')
self.forks_count = data.get('forks_count')
# Placeholder vars for releases info
self.releases = data.get('releases')
# Placeholder vars for local comparison
self.local_wf = None
self.local_is_latest = None
# Beautify date
for release in self.releases:
release['published_at_pretty'] = pretty_date(
datetime.datetime.strptime(release.get('published_at'), "%Y-%m-%dT%H:%M:%SZ")
)
release['published_at_timestamp'] = int(datetime.datetime.strptime(release.get('published_at'), "%Y-%m-%dT%H:%M:%SZ").strftime("%s"))
class LocalWorkflow(object):
""" Class to handle local workflows pulled by nextflow """
def __init__(self, name):
""" Initialise the LocalWorkflow object """
self.full_name = name
self.repository = None
self.local_path = None
self.commit_sha = None
self.remote_url = None
self.branch = None
self.last_pull = None
self.last_pull_date = None
self.last_pull_pretty = None
def get_local_nf_workflow_details(self):
""" Get full details about a local cached workflow """
if self.local_path is None:
# Try to guess the local cache directory
if os.environ.get('NXF_ASSETS'):
nf_wfdir = os.path.join(os.environ.get('NXF_ASSETS'), self.full_name)
else:
nf_wfdir = os.path.join(os.getenv("HOME"), '.nextflow', 'assets', self.full_name)
if os.path.isdir(nf_wfdir):
logging.debug("Guessed nextflow assets workflow directory")
self.local_path = nf_wfdir
# Use `nextflow info` to get more details about the workflow
else:
try:
with open(os.devnull, 'w') as devnull:
nfinfo_raw = subprocess.check_output(['nextflow', 'info', '-d', self.full_name], stderr=devnull)
except OSError as e:
if e.errno == os.errno.ENOENT:
raise AssertionError("It looks like Nextflow is not installed. It is required for most nf-core functions.")
except subprocess.CalledProcessError as e:
raise AssertionError("`nextflow list` returned non-zero error code: %s,\n %s", e.returncode, e.output)
else:
re_patterns = {
'repository': r"repository\s*: (.*)",
'local_path': r"local path\s*: (.*)"
}
for key, pattern in re_patterns.items():
m = re.search(pattern, nfinfo_raw)
if m:
setattr(self, key, m.group(1))
# Pull information from the local git repository
if self.local_path is not None:
repo = git.Repo(self.local_path)
self.commit_sha = str(repo.head.commit.hexsha)
self.remote_url = str(repo.remotes.origin.url)
self.branch = str(repo.active_branch)
self.last_pull = os.stat(os.path.join(self.local_path, '.git', 'FETCH_HEAD')).st_mtime
self.last_pull_date = datetime.datetime.fromtimestamp(self.last_pull).strftime("%Y-%m-%d %H:%M:%S")
self.last_pull_pretty = pretty_date(self.last_pull)
def pretty_date(time):
"""Transforms a datetime object or a int() Epoch timestamp into a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
Based on https://stackoverflow.com/a/1551394/713980
Adapted by sven1103
"""
from datetime import datetime
now = datetime.now()
if isinstance(time, datetime):
diff = now - time
else:
diff = now - datetime.fromtimestamp(time)
second_diff = diff.seconds
day_diff = diff.days
pretty_msg = OrderedDict()
pretty_msg[0] = [(float('inf'), 1, 'from the future')]
pretty_msg[1] = [
(10, 1, "just now"),
(60, 1, "{sec:.0f} seconds ago"),
(120, 1, "a minute ago"),
(3600, 60, "{sec:.0f} minutes ago"),
(7200, 1, "an hour ago"),
(86400, 3600, "{sec:.0f} hours ago")
]
pretty_msg[2] = [(float('inf'), 1, 'yesterday')]
pretty_msg[7] = [(float('inf'), 1, '{days:.0f} day{day_s} ago')]
pretty_msg[31] = [(float('inf'), 7, '{days:.0f} week{day_s} ago')]
pretty_msg[365] = [(float('inf'), 30, '{days:.0f} months ago')]
pretty_msg[float('inf')] = [(float('inf'), 365, '{days:.0f} year{day_s} ago')]
for days, seconds in pretty_msg.items():
if day_diff < days:
for sec in seconds:
if second_diff < sec[0]:
return sec[2].format(
days = day_diff/sec[1],
sec = second_diff/sec[1],
day_s = 's' if day_diff/sec[1] > 1 else ''
)
return '... time is relative anyway'
| 40.284916 | 145 | 0.586118 |
f201b8294613e91e5506ab1b505052768b4882c1 | 1,283 | py | Python | backend/server/apps/ml/registry.py | LeonZly90/djangoModel | cef09be8fb34fa5282264d7cbe08f173e8718816 | [
"MIT"
] | null | null | null | backend/server/apps/ml/registry.py | LeonZly90/djangoModel | cef09be8fb34fa5282264d7cbe08f173e8718816 | [
"MIT"
] | null | null | null | backend/server/apps/ml/registry.py | LeonZly90/djangoModel | cef09be8fb34fa5282264d7cbe08f173e8718816 | [
"MIT"
] | null | null | null | # file backend/server/apps/ml/registry.py
from apps.endpoints.models import Endpoint
from apps.endpoints.models import MLAlgorithm
from apps.endpoints.models import MLAlgorithmStatus
class MLRegistry:
def __init__(self):
self.endpoints = {}
def add_algorithm(self, endpoint_name, algorithm_object, algorithm_name,
algorithm_status, algorithm_version, owner,
algorithm_description, algorithm_code):
# get endpoint
endpoint, _ = Endpoint.objects.get_or_create(name=endpoint_name, owner=owner)
# get algorithm
database_object, algorithm_created = MLAlgorithm.objects.get_or_create(
name=algorithm_name,
description=algorithm_description,
code=algorithm_code,
version=algorithm_version,
owner=owner,
parent_endpoint=endpoint)
if algorithm_created:
status = MLAlgorithmStatus(status=algorithm_status,
created_by=owner,
parent_mlalgorithm=database_object,
active=True)
status.save()
# add to registry
self.endpoints[database_object.id] = algorithm_object
| 37.735294 | 85 | 0.624318 |
0fb7192d7a014f57e2a669021e1c6fc0ec3aaf6f | 3,135 | py | Python | gpytorch/lazy/added_diag_lazy_tensor.py | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 | [
"MIT"
] | 2 | 2019-03-31T04:36:30.000Z | 2019-05-22T20:09:25.000Z | gpytorch/lazy/added_diag_lazy_tensor.py | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 | [
"MIT"
] | null | null | null | gpytorch/lazy/added_diag_lazy_tensor.py | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 | [
"MIT"
] | 1 | 2019-02-15T17:05:42.000Z | 2019-02-15T17:05:42.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from .non_lazy_tensor import NonLazyTensor
from .sum_lazy_tensor import SumLazyTensor
from .diag_lazy_tensor import DiagLazyTensor
from ..utils import pivoted_cholesky
from ..utils.cholesky import batch_potrf
from .. import settings
class AddedDiagLazyTensor(SumLazyTensor):
"""
A SumLazyTensor, but of only two lazy tensors, the second of which must be
a DiagLazyTensor.
"""
def __init__(self, *lazy_tensors):
lazy_tensors = list(lazy_tensors)
super(AddedDiagLazyTensor, self).__init__(*lazy_tensors)
if len(lazy_tensors) > 2:
raise RuntimeError("An AddedDiagLazyTensor can only have two components")
if isinstance(lazy_tensors[0], DiagLazyTensor) and isinstance(lazy_tensors[1], DiagLazyTensor):
raise RuntimeError("Trying to lazily add two DiagLazyTensors. " "Create a single DiagLazyTensor instead.")
elif isinstance(lazy_tensors[0], DiagLazyTensor):
self._diag_tensor = lazy_tensors[0]
self._lazy_tensor = lazy_tensors[1]
elif isinstance(lazy_tensors[1], DiagLazyTensor):
self._diag_tensor = lazy_tensors[1]
self._lazy_tensor = lazy_tensors[0]
else:
raise RuntimeError("One of the LazyTensors input to AddedDiagLazyTensor must be a DiagLazyTensor!")
def add_diag(self, added_diag):
return AddedDiagLazyTensor(self._lazy_tensor, self._diag_tensor.add_diag(added_diag))
def _preconditioner(self):
if settings.max_preconditioner_size.value() == 0:
return None, None
if not hasattr(self, "_woodbury_cache"):
max_iter = settings.max_preconditioner_size.value()
self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(self._lazy_tensor, max_iter)
self._woodbury_cache = pivoted_cholesky.woodbury_factor(self._piv_chol_self, self._diag_tensor.diag())
# preconditioner
def precondition_closure(tensor):
return pivoted_cholesky.woodbury_solve(
tensor, self._piv_chol_self, self._woodbury_cache, self._diag_tensor.diag()
)
# log_det correction
if not hasattr(self, "_precond_log_det_cache"):
lr_flipped = self._piv_chol_self.matmul(
self._piv_chol_self.transpose(-2, -1).div(self._diag_tensor.diag().unsqueeze(-1))
)
lr_flipped = lr_flipped + torch.eye(n=lr_flipped.size(-2), dtype=lr_flipped.dtype, device=lr_flipped.device)
if lr_flipped.ndimension() == 3:
ld_one = (NonLazyTensor(batch_potrf(lr_flipped)).diag().log().sum(-1)) * 2
ld_two = self._diag_tensor.diag().log().sum(-1)
else:
ld_one = lr_flipped.potrf().diag().log().sum() * 2
ld_two = self._diag_tensor.diag().log().sum().item()
self._precond_log_det_cache = ld_one + ld_two
return precondition_closure, self._precond_log_det_cache
| 44.15493 | 120 | 0.685167 |
529646ca2bafbe17040ea504ec50fc4fe1f07f1f | 4,934 | py | Python | python/fate_arch/common/base_utils.py | QuantumA/FATE | 89a3dd593252128c1bf86fb1014b25a629bdb31a | [
"Apache-2.0"
] | 715 | 2019-01-24T10:52:03.000Z | 2019-10-31T12:19:22.000Z | python/fate_arch/common/base_utils.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 270 | 2019-02-11T02:57:36.000Z | 2019-08-29T11:22:33.000Z | python/fate_arch/common/base_utils.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 200 | 2019-01-26T14:21:35.000Z | 2019-11-01T01:14:36.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import datetime
import io
import json
import os
import pickle
import socket
import time
import uuid
from enum import Enum, IntEnum
from fate_arch.common.conf_utils import get_base_config
from fate_arch.common import BaseType
use_deserialize_safe_module = get_base_config('use_deserialize_safe_module', False)
class CustomJSONEncoder(json.JSONEncoder):
def __init__(self, **kwargs):
self._with_type = kwargs.pop("with_type", False)
super().__init__(**kwargs)
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, datetime.timedelta):
return str(obj)
elif issubclass(type(obj), Enum) or issubclass(type(obj), IntEnum):
return obj.value
elif isinstance(obj, set):
return list(obj)
elif issubclass(type(obj), BaseType):
if not self._with_type:
return obj.to_dict()
else:
return obj.to_dict_with_type()
elif isinstance(obj, type):
return obj.__name__
else:
return json.JSONEncoder.default(self, obj)
def fate_uuid():
return uuid.uuid1().hex
def string_to_bytes(string):
return string if isinstance(string, bytes) else string.encode(encoding="utf-8")
def bytes_to_string(byte):
return byte.decode(encoding="utf-8")
def json_dumps(src, byte=False, indent=None, with_type=False):
dest = json.dumps(src, indent=indent, cls=CustomJSONEncoder, with_type=with_type)
if byte:
dest = string_to_bytes(dest)
return dest
def json_loads(src, object_hook=None, object_pairs_hook=None):
if isinstance(src, bytes):
src = bytes_to_string(src)
return json.loads(src, object_hook=object_hook, object_pairs_hook=object_pairs_hook)
def current_timestamp():
return int(time.time() * 1000)
def timestamp_to_date(timestamp, format_string="%Y-%m-%d %H:%M:%S"):
timestamp = int(timestamp) / 1000
time_array = time.localtime(timestamp)
str_date = time.strftime(format_string, time_array)
return str_date
def date_string_to_timestamp(time_str, format_string="%Y-%m-%d %H:%M:%S"):
time_array = time.strptime(time_str, format_string)
time_stamp = int(time.mktime(time_array) * 1000)
return time_stamp
def serialize_b64(src, to_str=False):
dest = base64.b64encode(pickle.dumps(src))
if not to_str:
return dest
else:
return bytes_to_string(dest)
def deserialize_b64(src):
src = base64.b64decode(string_to_bytes(src) if isinstance(src, str) else src)
if use_deserialize_safe_module:
return restricted_loads(src)
return pickle.loads(src)
safe_module = {
'federatedml',
'numpy',
'fate_flow'
}
class RestrictedUnpickler(pickle.Unpickler):
def find_class(self, module, name):
import importlib
if module.split('.')[0] in safe_module:
_module = importlib.import_module(module)
return getattr(_module, name)
# Forbid everything else.
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
(module, name))
def restricted_loads(src):
"""Helper function analogous to pickle.loads()."""
return RestrictedUnpickler(io.BytesIO(src)).load()
def get_lan_ip():
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', string_to_bytes(ifname[:15])))[20:24])
ip = socket.gethostbyname(socket.getfqdn())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"bond1",
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError as e:
pass
return ip or ''
| 28.686047 | 106 | 0.642683 |
f1b3b2b74b8759671fe5b11e450ba548368bbe8f | 4,881 | py | Python | gnome/gnome2/gedit/plugins.symlink/ViGedit/bindings/ex.py | icebreaker/dotfiles | 5c3dc7f981069a728cc6b34ae39cd4c2da1122aa | [
"MIT"
] | 4 | 2015-03-17T14:36:49.000Z | 2019-06-10T09:34:35.000Z | gnome/gnome2/gedit/plugins.symlink/ViGedit/bindings/ex.py | icebreaker/dotfiles | 5c3dc7f981069a728cc6b34ae39cd4c2da1122aa | [
"MIT"
] | null | null | null | gnome/gnome2/gedit/plugins.symlink/ViGedit/bindings/ex.py | icebreaker/dotfiles | 5c3dc7f981069a728cc6b34ae39cd4c2da1122aa | [
"MIT"
] | 1 | 2019-03-01T13:21:55.000Z | 2019-03-01T13:21:55.000Z | from base import VIG_ModeBase
import os
import glob
import re
class MODE_options(object):
"""Options object for this mode"""
def __init__(self, act, options=None):
self.lastCommand = None
self.history = []
self.index = -1
class Mode(VIG_ModeBase):
def setup(self, act):
self.reg(self.evaluateEx, act.gtk.keysyms.Return, ignoreStack=True)
self.reg(self.evaluateEx, act.gtk.keysyms.KP_Enter, ignoreStack=True)
self.reg(self.cycleCompletions, act.gtk.keysyms.Tab)
self.reg(self.cycleHistoryBackward, act.gtk.keysyms.Up, ignoreStack=True)
self.reg(self.cycleHistoryForward, act.gtk.keysyms.Down, ignoreStack=True)
self.reg(self.cycleHistoryEnd, act.gtk.keysyms.Page_Down, ignoreStack=True)
self.reg(self.cycleHistoryStart, act.gtk.keysyms.Page_Up, ignoreStack=True)
def status(self, act):
if act.vibase.stack:
return ":" + "".join(act.vibase.stack)
else:
return "%s (start typing command)" % VIG_ModeBase.status(self, act)
def intro(self, act, options=None):
VIG_ModeBase.intro(self, act, options)
#I want the history to survive for the entire window
if not hasattr(act.vigtk, "exOptions"):
#we want the options object to only be initialised once
act.vigtk.exOptions = MODE_options(act, options)
def handle(self, act, event):
options = act.vigtk.exOptions
if event.keyval == act.gtk.keysyms.BackSpace:
if act.vibase.stack:
act.vibase.stack.pop()
if event.keyval == act.gtk.keysyms.Escape:
act.bindings.mode = act.modes.command
elif event.keyval not in (act.gtk.keysyms.Return, act.gtk.keysyms.BackSpace):
act.vibase.addToStack(event)
return True
def cycleHistoryBackward(self, act):
options = act.vigtk.exOptions
command = "".join(act.vibase.stack)
if command and command != options.lastCommand :
if options.index == 0:
options.history.insert(0, command)
else:
options.history.insert(options.index+1, command)
options.lastCommand = command
if not act.vibase.stack:
act.vibase.stack = list(options.history[options.index])
elif options.index > 0:
options.index -= 1
options.lastCommand = options.history[options.index]
act.vibase.stack = list(options.history[options.index])
def cycleHistoryForward(self, act):
options = act.vigtk.exOptions
command = "".join(act.vibase.stack)
if command and command != options.lastCommand :
options.history.insert(options.index+1, command)
options.lastCommand = command
options.index += 1
if options.index < (len(options.history)-1):
options.index += 1
options.lastCommand = options.history[options.index]
act.vibase.stack = list(options.history[options.index])
def cycleHistoryStart(self, act):
options = act.vigtk.exOptions
command = "".join(act.vibase.stack)
if command and command != options.lastCommand :
if options.index == 0:
options.history.insert(0, command)
else:
options.history.insert(options.index+1, command)
options.lastCommand = command
if options.index != 0:
options.index = 0
options.lastCommand = options.history[options.index]
act.vibase.stack = list(options.history[options.index])
def cycleHistoryEnd(self, act):
options = act.vigtk.exOptions
command = "".join(act.vibase.stack)
if command and command != options.lastCommand :
options.history.insert(options.index+1, command)
options.lastCommand = command
options.index += 1
if options.index < (len(options.history)-1):
options.index = len(options.history)-1
options.lastCommand = options.history[options.index]
act.vibase.stack = list(options.history[options.index])
def cycleCompletions(self, act, up = True):
act.trace.info(1, "TODO : make tab completion work")
#I didn't like the previous code for this and removed it
#At some point I'll come back and reimplement tab completion
#unless someone else does it for me :p
def evaluateEx(self, act):
command = "".join(act.vibase.stack)
act.trace.info(1, "evaluating expression %s" % command)
act.ex.evaluate(act, command)
| 40.338843 | 88 | 0.597419 |
c9c1e787f892ee23bf90bf87e9d1e60ce7065a7c | 17,940 | py | Python | venv/lib/python3.6/site-packages/django/utils/feedgenerator.py | xiegudong45/typeidea | db6504a232d120d6ffa185730bd35b9b9ecffa6c | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | venv/lib/python3.6/site-packages/django/utils/feedgenerator.py | xiegudong45/typeidea | db6504a232d120d6ffa185730bd35b9b9ecffa6c | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | virtual/lib/python3.6/site-packages/django/utils/feedgenerator.py | ngishjonathan/gallery | dd67f28887316d6277927c667f6641d26317b0b8 | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
import warnings
from django.utils import datetime_safe, six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import utc
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + '-0000'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + 'Z'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None,
enclosures=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosures, which is an iterable of instances of the
Enclosure class.
"""
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
if enclosure is None:
enclosures = [] if enclosures is None else enclosures
else:
warnings.warn(
"The enclosure keyword argument is deprecated, "
"use enclosures instead.",
RemovedInDjango20Warning,
stacklevel=2,
)
enclosures = [enclosure]
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosures': enclosures,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current UTC date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
# datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now
return latest_date or datetime.datetime.utcnow().replace(tzinfo=utc)
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None, {"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of RssFeed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % (item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(
"dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"}
)
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosures']:
enclosures = list(item['enclosures'])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement('enclosure', '', {
'url': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: https://tools.ietf.org/html/rfc4287
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosures.
for enclosure in item['enclosures']:
handler.addQuickElement('link', '', {
'rel': 'enclosure',
'href': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of Atom1Feed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| 38.915401 | 107 | 0.609253 |
116fe1554d476c08d020bcd1f4c7658768206adb | 494 | py | Python | mimic/canned_responses/glance.py | ksheedlo/mimic | c84b6a0d336e8a37a685b5d71537aec5e44d9a8f | [
"Apache-2.0"
] | 141 | 2015-01-07T19:28:31.000Z | 2022-02-11T06:04:13.000Z | mimic/canned_responses/glance.py | ksheedlo/mimic | c84b6a0d336e8a37a685b5d71537aec5e44d9a8f | [
"Apache-2.0"
] | 575 | 2015-01-04T20:23:08.000Z | 2019-10-04T08:20:04.000Z | mimic/canned_responses/glance.py | ksheedlo/mimic | c84b6a0d336e8a37a685b5d71537aec5e44d9a8f | [
"Apache-2.0"
] | 63 | 2015-01-09T20:39:41.000Z | 2020-07-06T14:20:56.000Z | """
Cannned responses for glance images
"""
from __future__ import absolute_import, division, unicode_literals
from mimic.canned_responses.json.glance.glance_images_json import (images,
image_schema)
def get_images():
"""
Canned response for glance images list call
"""
return images
def get_image_schema():
"""
Canned response for GET glance image schema API call
"""
return image_schema
| 21.478261 | 80 | 0.62753 |
ff5785a14a6d825bda7afbc2059046172715ad63 | 557 | py | Python | src/sae-svm.py | taoxianpeng/pytorch-AutoEncoders | cb0edd050c763f0fc29f8275c266b151f5d5dca9 | [
"Apache-2.0"
] | null | null | null | src/sae-svm.py | taoxianpeng/pytorch-AutoEncoders | cb0edd050c763f0fc29f8275c266b151f5d5dca9 | [
"Apache-2.0"
] | null | null | null | src/sae-svm.py | taoxianpeng/pytorch-AutoEncoders | cb0edd050c763f0fc29f8275c266b151f5d5dca9 | [
"Apache-2.0"
] | null | null | null | from torchvision import datasets
from torchvision.datasets import ImageFolder
import torch
from sklearn import svm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def load_datasets(path):
datasets = ImageFolder(path)
def load_modal():
modal = torch.load('D:\GitHub\pytorch-AutoEncoders\src\SEAutoEncoder_modal.pt')
return modal
def clas_svm():
pass
def clas_pso_svm():
pass
def clas_spso_svm():
pass
def plot():
pass
if __name__ == "__main__":
antoEncoder = load_modal()
print(antoEncoder)
| 19.892857 | 83 | 0.734291 |
247445ca1e9423de1e971fc25b21e90051592b4f | 311 | py | Python | celery_strategy/utils.py | thinmy/celery-strategy | 7785e2092dbc0c4660dd68627fdfa8c009c4566e | [
"MIT"
] | 1 | 2019-06-11T09:49:29.000Z | 2019-06-11T09:49:29.000Z | celery_strategy/utils.py | thinmy/celery-strategy | 7785e2092dbc0c4660dd68627fdfa8c009c4566e | [
"MIT"
] | 1 | 2019-08-20T13:23:46.000Z | 2019-08-20T13:23:46.000Z | celery_strategy/utils.py | thinmy/celery-strategy | 7785e2092dbc0c4660dd68627fdfa8c009c4566e | [
"MIT"
] | null | null | null | from celery.task.control import inspect
def get_celery_worker_status():
info = inspect()
d = info.stats() # type: dict
if not d:
d = {
'message': 'No running Celery workers were found.',
'status': False
}
else:
d['status'] = True
return d
| 19.4375 | 63 | 0.540193 |
c129df70aacb61e957e17dad754a52d8f57e7618 | 498 | py | Python | tests/test_smt.py | rdaly525/MetaMapper | 7f8278a012435d5d05ff6b388b7c1146061d6f1c | [
"BSD-3-Clause"
] | 3 | 2019-04-09T22:58:21.000Z | 2019-08-16T18:18:12.000Z | tests/test_smt.py | rdaly525/MetaMapper | 7f8278a012435d5d05ff6b388b7c1146061d6f1c | [
"BSD-3-Clause"
] | 22 | 2019-03-13T19:42:35.000Z | 2022-03-29T19:49:33.000Z | tests/test_smt.py | rdaly525/MetaMapper | 7f8278a012435d5d05ff6b388b7c1146061d6f1c | [
"BSD-3-Clause"
] | 1 | 2019-04-09T05:35:52.000Z | 2019-04-09T05:35:52.000Z | from metamapper import CoreIRContext
from metamapper.irs.coreir import gen_CoreIRNodes
import metamapper.coreir_util as cutil
from metamapper.common_passes import SMT, print_dag, prove_equal
def test_dag_to_smt():
CoreIRContext(reset=True)
CoreIRNodes = gen_CoreIRNodes(16)
cmod = cutil.load_from_json("examples/coreir/add1_const.json")
dag = cutil.coreir_to_dag(CoreIRNodes, cmod)
print_dag(dag)
counter_example = prove_equal(dag, dag)
assert counter_example is None
| 33.2 | 66 | 0.789157 |
55e0ef62d5e894647e653f7ab4fad209bc973bad | 441 | py | Python | football/core/management/commands/calculate_weekly_scores.py | johnshiver/football_tools | 23e5137f176f0cb2f4dc42ee4a5ed56d6de37cf3 | [
"MIT"
] | 2 | 2016-08-26T12:25:42.000Z | 2016-08-26T14:53:15.000Z | football/core/management/commands/calculate_weekly_scores.py | johnshiver/football_tools | 23e5137f176f0cb2f4dc42ee4a5ed56d6de37cf3 | [
"MIT"
] | 2 | 2016-08-26T03:14:01.000Z | 2017-08-08T20:15:27.000Z | football/core/management/commands/calculate_weekly_scores.py | johnshiver/football_tools | 23e5137f176f0cb2f4dc42ee4a5ed56d6de37cf3 | [
"MIT"
] | 1 | 2019-11-17T20:56:10.000Z | 2019-11-17T20:56:10.000Z | from django.core.management.base import BaseCommand
from ...models import WeeklyStats, Player
class Command(BaseCommand):
def handle(self, *args, **options):
weekly_stats = WeeklyStats.objects.all()
for stat in weekly_stats:
stat.total_score = stat.calc_total_score()
stat.save()
print stat
for player in Player.objects.all():
player.calculate_draft_bot_score()
| 29.4 | 54 | 0.655329 |
5c9087131e80dde6ef005a932646dfc1b282feb9 | 8,542 | py | Python | sdk/python/pulumi_azure_native/web/latest/get_web_app_deployment_slot.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/latest/get_web_app_deployment_slot.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/latest/get_web_app_deployment_slot.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWebAppDeploymentSlotResult',
'AwaitableGetWebAppDeploymentSlotResult',
'get_web_app_deployment_slot',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:web:getWebAppDeploymentSlot'.""", DeprecationWarning)
@pulumi.output_type
class GetWebAppDeploymentSlotResult:
"""
User credentials used for publishing activity.
"""
def __init__(__self__, active=None, author=None, author_email=None, deployer=None, details=None, end_time=None, id=None, kind=None, message=None, name=None, start_time=None, status=None, system_data=None, type=None):
if active and not isinstance(active, bool):
raise TypeError("Expected argument 'active' to be a bool")
pulumi.set(__self__, "active", active)
if author and not isinstance(author, str):
raise TypeError("Expected argument 'author' to be a str")
pulumi.set(__self__, "author", author)
if author_email and not isinstance(author_email, str):
raise TypeError("Expected argument 'author_email' to be a str")
pulumi.set(__self__, "author_email", author_email)
if deployer and not isinstance(deployer, str):
raise TypeError("Expected argument 'deployer' to be a str")
pulumi.set(__self__, "deployer", deployer)
if details and not isinstance(details, str):
raise TypeError("Expected argument 'details' to be a str")
pulumi.set(__self__, "details", details)
if end_time and not isinstance(end_time, str):
raise TypeError("Expected argument 'end_time' to be a str")
pulumi.set(__self__, "end_time", end_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if message and not isinstance(message, str):
raise TypeError("Expected argument 'message' to be a str")
pulumi.set(__self__, "message", message)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_time and not isinstance(start_time, str):
raise TypeError("Expected argument 'start_time' to be a str")
pulumi.set(__self__, "start_time", start_time)
if status and not isinstance(status, int):
raise TypeError("Expected argument 'status' to be a int")
pulumi.set(__self__, "status", status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def active(self) -> Optional[bool]:
"""
True if deployment is currently active, false if completed and null if not started.
"""
return pulumi.get(self, "active")
@property
@pulumi.getter
def author(self) -> Optional[str]:
"""
Who authored the deployment.
"""
return pulumi.get(self, "author")
@property
@pulumi.getter(name="authorEmail")
def author_email(self) -> Optional[str]:
"""
Author email.
"""
return pulumi.get(self, "author_email")
@property
@pulumi.getter
def deployer(self) -> Optional[str]:
"""
Who performed the deployment.
"""
return pulumi.get(self, "deployer")
@property
@pulumi.getter
def details(self) -> Optional[str]:
"""
Details on deployment.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[str]:
"""
End time.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def message(self) -> Optional[str]:
"""
Details about deployment status.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
"""
Start time.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def status(self) -> Optional[int]:
"""
Deployment status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppDeploymentSlotResult(GetWebAppDeploymentSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppDeploymentSlotResult(
active=self.active,
author=self.author,
author_email=self.author_email,
deployer=self.deployer,
details=self.details,
end_time=self.end_time,
id=self.id,
kind=self.kind,
message=self.message,
name=self.name,
start_time=self.start_time,
status=self.status,
system_data=self.system_data,
type=self.type)
def get_web_app_deployment_slot(id: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppDeploymentSlotResult:
"""
User credentials used for publishing activity.
Latest API Version: 2020-10-01.
:param str id: Deployment ID.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API gets a deployment for the production slot.
"""
pulumi.log.warn("""get_web_app_deployment_slot is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:web:getWebAppDeploymentSlot'.""")
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/latest:getWebAppDeploymentSlot', __args__, opts=opts, typ=GetWebAppDeploymentSlotResult).value
return AwaitableGetWebAppDeploymentSlotResult(
active=__ret__.active,
author=__ret__.author,
author_email=__ret__.author_email,
deployer=__ret__.deployer,
details=__ret__.details,
end_time=__ret__.end_time,
id=__ret__.id,
kind=__ret__.kind,
message=__ret__.message,
name=__ret__.name,
start_time=__ret__.start_time,
status=__ret__.status,
system_data=__ret__.system_data,
type=__ret__.type)
| 34.723577 | 220 | 0.621868 |
4e2e2ae0ba38f141a290902438f61772d48903ed | 329 | py | Python | Back/ns_portal/resources/root/security/oauth2/v1/logout/logout_resource.py | anthonyHenryNS/NsPortal | dcb4b4e0a70c1c3431d5438d97e80f5d05c8e10e | [
"MIT"
] | 1 | 2019-01-22T15:16:43.000Z | 2019-01-22T15:16:43.000Z | Back/ns_portal/resources/root/security/oauth2/v1/logout/logout_resource.py | NaturalSolutions/NsPortal | bcd07fdf015948a82f4d0c3c9a02f513b2d99f5d | [
"MIT"
] | 16 | 2015-09-28T14:46:13.000Z | 2020-04-20T10:34:25.000Z | Back/ns_portal/resources/root/security/oauth2/v1/logout/logout_resource.py | anthonyHenryNS/NsPortal | dcb4b4e0a70c1c3431d5438d97e80f5d05c8e10e | [
"MIT"
] | 10 | 2015-05-06T08:05:09.000Z | 2020-01-27T13:39:47.000Z | from ns_portal.core.resources import (
MetaEndPointResource
)
from pyramid.security import (
Allow,
Everyone,
forget
)
class LogoutResource(MetaEndPointResource):
__acl__ = [
(Allow, Everyone, 'read')
]
def GET(self):
forget(self.request)
return self.request.response
| 15.666667 | 43 | 0.641337 |
fb54e510df320040213e5f5032673d7a7ba42c68 | 6,969 | py | Python | libmultilabel/nn/networks/bigru_2.py | Abhinav43/LibMultiLabel | 805a66c13bf44e547f4b6976cae923172b96800f | [
"MIT"
] | null | null | null | libmultilabel/nn/networks/bigru_2.py | Abhinav43/LibMultiLabel | 805a66c13bf44e547f4b6976cae923172b96800f | [
"MIT"
] | null | null | null | libmultilabel/nn/networks/bigru_2.py | Abhinav43/LibMultiLabel | 805a66c13bf44e547f4b6976cae923172b96800f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from ..networks.base import BaseModel
import torch.nn.functional as F
import torch_geometric.transforms as T
from scipy import sparse
from torch_geometric.nn import GCNConv
import numpy as np
import pickle as pk
def get_gcn_data(file_name):
with open(file_name, 'rb') as f:
data = pk.load(f)
with open('use_use_m_None_2.pk', 'rb') as f:
data_2 = pk.load(f)
edm = data['emd']
adj = data_2['edge']
return edm, adj
from torch_sparse import SparseTensor
class GCN(torch.nn.Module):
def __init__(self, dim_sim, out_dim):
super(GCN, self).__init__()
self.conv1 = GCNConv(dim_sim, 1024)
self.conv2 = GCNConv(1024, out_dim)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = F.relu(x)
x = self.conv2(x, edge_index)
return x
class BiGRU(BaseModel):
"""BiGRU (Bidirectional Gated Recurrent Unit)
Args:
embed_vecs (FloatTensor): The pre-trained word vectors of shape (vocab_size, embed_dim).
num_classes (int): Total number of classes.
rnn_dim (int): The size of bidirectional hidden layers. The hidden size of the GRU network
is set to rnn_dim//2. Defaults to 512.
rnn_layers (int): Number of recurrent layers. Defaults to 1.
dropout (float): The dropout rate of the word embedding. Defaults to 0.2.
activation (str): Activation function to be used. Defaults to 'tanh'.
"""
def __init__(
self,
embed_vecs,
num_classes,
gcn_file,
model_mode,
model_dim_gcn,
soft_max,
line,
conca,
rnn_dim=512,
rnn_layers=1,
dropout=0.2,
activation='tanh',
**kwargs
):
super(BiGRU, self).__init__(embed_vecs, dropout, activation, **kwargs)
assert rnn_dim%2 == 0, """`rnn_dim` should be even."""
# BiGRU
emb_dim = embed_vecs.shape[1]
self.rnn = nn.GRU(emb_dim, rnn_dim//2, rnn_layers,
bidirectional=True, batch_first=True)
self.model_dim_gcn = model_dim_gcn
self.att = Normal_gcn(rnn_dim, num_classes, model_mode, soft_max, line, conca)
x_da, adj_da = get_gcn_data(gcn_file)
self.x_da_f = torch.nn.Parameter(
torch.Tensor(x_da).cuda(), requires_grad=True)
self.A = torch.Tensor(adj_da).cuda()
self.edge_index = self.A.nonzero(as_tuple=False).t()
self.edge_weight = torch.nn.Parameter(self.A[self.edge_index[0], self.edge_index[1]],requires_grad=True)
self.adj = SparseTensor(row=self.edge_index[0], col=self.edge_index[1], value=self.edge_weight,
sparse_sizes=(50,50))
self.gcn = GCN(self.x_da_f.shape[-1], 1024).cuda()
def forward(self, input):
text, length, indices = self.sort_data_by_length(input['text'], input['length'])
x = self.embedding(text) # (batch_size, length, rnn_dim)
x = self.embed_drop(x) # (batch_size, length, rnn_dim)
packed_inputs = pack_padded_sequence(x, length, batch_first=True)
x, _ = self.rnn(packed_inputs)
x = pad_packed_sequence(x)[0]
x = x.permute(1, 0, 2)
x = torch.tanh(x)
x_gcn = self.gcn(self.x_da_f, self.adj)
x = self.att(x, x_gcn)
return {'logits': x[indices]}
def sort_data_by_length(self, data, length):
"""Sort data by lengths. If data is not sorted before calling `pack_padded_sequence`,
under `enforce_sorted=False`, the setting of `use_deterministic_algorithms` must be False.
To keep `use_deterministic_algorithms` True, we sort the input here and return indices for
restoring the original order.
Args:
data (torch.Tensor): Batch of sequences with shape (batch_size, length)
length (list): List of text lengths before padding.
Returns:
data (torch.Tensor): Sequences sorted by lengths in descending order.
length (torch.Tensor): Lengths sorted in descending order.
indices (torch.Tensor): The indexes of the elements in the original data.
"""
length = torch.as_tensor(length, dtype=torch.int64)
length, sorted_indices = torch.sort(length, descending=True)
sorted_indices = sorted_indices.to(data.device)
data = data.index_select(0, sorted_indices)
data_size = sorted_indices.size(-1)
indices = torch.empty(data_size, dtype=torch.long)
indices[sorted_indices] = torch.arange(data_size)
return data, length, indices
def compress_output(d, method, kernel_size,
out_channels):
dim = 2
if method == 'amax':
return torch.amax(d, dim)
elif method == 'amin':
return torch.amin(d, dim)
elif method == 'mean':
return torch.mean(d, dim)
elif method == 'norm':
return torch.norm(d, dim=dim)
elif method == 'std':
return torch.std(d, dim=dim)
elif method == 'sum':
return torch.sum(d, dim=dim)
elif method == 'both':
cov_out = torch.nn.Conv1d(in_channels = d.shape[1],
out_channels= out_channels,
kernel_size = kernel_size,
stride= 1).cuda()(d)
fin = torch.nn.MaxPool1d(kernel_size,
cov_out.shape[-1]).cuda()(
cov_out).squeeze()
return fin
class Normal_gcn(torch.nn.Module):
def __init__(self, rnn_dim, num_classes,
mode, soft_max, line, conca):
super(Normal_gcn, self).__init__()
self.U = nn.Linear(rnn_dim, num_classes)
xavier_uniform_(self.U.weight)
# linear output
self.final = nn.Linear(rnn_dim, num_classes)
xavier_uniform_(self.final.weight)
self.mode = mode
self.soft_max = int(soft_max)
self.line = int(line)
self.conca = int(conca)
if self.line:
self.lin_2 = nn.Linear(50, 50)
def forward(self, x, gcn_out):
if self.soft_max == 1:
gcn_out = torch.softmax(gcn_out, dim=1)
alpha = torch.softmax(self.U.weight.matmul(x.transpose(1, 2)), dim=2)
m = alpha.matmul(x)
x = gcn_out.mul(m)
x = compress_output(x, self.mode, 3, 50, self.cuda_r)
x = x.add(self.final.bias)
if self.line == 1:
x = self.lin_2(x)
if self.conca == 1:
new_g = gcn_out.view(x.shape[0],-1)
coat_out = torch.cat((x, new_g),1)
lin_2 = torch.nn.Linear(coat_out.shape[-1], x.shape[-1])
x = lin_2(coat_out)
return x
| 29.405063 | 117 | 0.599512 |
f78ac0bdafabbc58d8cba5091d25c11f51823246 | 444 | py | Python | globo/aplications/core/migrations/0007_alter_sumasysaldos_cierre_anterior.py | orlandor55/globo | bb236e2b0a9c803794416478470e71081116bd56 | [
"MIT"
] | null | null | null | globo/aplications/core/migrations/0007_alter_sumasysaldos_cierre_anterior.py | orlandor55/globo | bb236e2b0a9c803794416478470e71081116bd56 | [
"MIT"
] | null | null | null | globo/aplications/core/migrations/0007_alter_sumasysaldos_cierre_anterior.py | orlandor55/globo | bb236e2b0a9c803794416478470e71081116bd56 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-03 18:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_sumasysaldos_axi_ejercicio_anterior'),
]
operations = [
migrations.AlterField(
model_name='sumasysaldos',
name='cierre_anterior',
field=models.FloatField(default=0, verbose_name='Cierre Anterior'),
),
]
| 23.368421 | 79 | 0.637387 |
9e2442bf84fad94dd52b08de246915b1ac0b1a48 | 1,193 | py | Python | cobra/tests/testdcode.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cobra/tests/testdcode.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cobra/tests/testdcode.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import unittest
import contextlib
import cobra
import cobra.dcode
import cobra.remoteapp
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
def add(self):
return self.x + self.y
def mult(self):
return self.x * self.y
def pow(self):
return self.x ** self.y
@contextlib.contextmanager
def getDcodeDmon():
dmon = cobra.CobraDaemon(host='0.0.0.0', port=41923, msgpack=True)
dmon.fireThread()
cobra.dcode.enableDcodeServer(daemon=dmon)
name = cobra.remoteapp.shareRemoteApp('cobra.testclient', appsrv=Foo(3, 4), daemon=dmon)
yield (name, dmon)
dmon.unshareObject(name)
def buildCobra(host, port, name):
builder = cobra.initSocketBuilder(host, port)
builder.setTimeout(5)
return cobra.CobraProxy('cobra://%s:%s/%s?msgpack=1' % (host, port, name))
class CobraDcodeTest(unittest.TestCase):
def test_cobra_dcode(self):
with getDcodeDmon() as (name, dmon):
# dmon.fireThread()
srv = buildCobra(dmon.host, dmon.port, name)
self.assertEqual(srv.add(), 7)
self.assertEqual(srv.mult(), 12)
self.assertEqual(srv.pow(), 81)
| 24.346939 | 92 | 0.638726 |
ffd7e38df2c39f6135f57d812165fe53c3e7310a | 7,334 | py | Python | client.py | Nedoko-maki/Internet-Voicechat | c5c764350039147890288ed3373d091f39c94fc0 | [
"MIT"
] | null | null | null | client.py | Nedoko-maki/Internet-Voicechat | c5c764350039147890288ed3373d091f39c94fc0 | [
"MIT"
] | null | null | null | client.py | Nedoko-maki/Internet-Voicechat | c5c764350039147890288ed3373d091f39c94fc0 | [
"MIT"
] | null | null | null | import logging
import queue
import socket
import threading
import traceback
import numpy
import pyflac
import select
import sounddevice as sd
import config
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)s:\t%(message)s',
level=logging.INFO,
datefmt='%H:%M:%S')
class _AudioHandler:
# _AudioHandler class takes care of the audio IO.
# No user-threads required! All handled by callback functions.
# TO-DO:
# - switch to UDP
# - add headers to packets (problem since the send and recv funcs decide what they like and send
# packets of varying sizes.)
def __init__(self, outgoing_buffer, incoming_buffer, audio_devices=None):
if audio_devices:
sd.default.device = audio_devices
self._stream = self._stream = sd.RawStream(samplerate=config.SAMPLE_RATE,
blocksize=config.PACKET_SIZE,
channels=config.CHANNELS,
dtype=numpy.int16,
callback=self._audio_callback)
self._encoder = pyflac.StreamEncoder(write_callback=self._encoder_callback, sample_rate=config.SAMPLE_RATE,
blocksize=config.PACKET_SIZE)
self._decoder = pyflac.StreamDecoder(write_callback=self._decoder_callback)
self._outgoing_buffer = outgoing_buffer
self._incoming_buffer = incoming_buffer
self._is_muted = False
def _audio_callback(self, in_data, out_data, *_) -> None:
if not self._is_muted:
self._encoder.process(numpy.frombuffer(in_data, dtype=numpy.int16))
if self._incoming_buffer.qsize() > 0:
data = self._incoming_buffer.get(block=False)
out_data[:] = data.tobytes()
elif self._incoming_buffer.qsize() == 0:
out_data[:] = bytes(config.PACKET_SIZE * 2)
def _encoder_callback(self, buffer, *_):
self._outgoing_buffer.put(buffer) # buffer is a built-in bytes object.
def _decoder_callback(self, data, *_):
self._incoming_buffer.put(data)
def _toggle_mute(self):
self._outgoing_buffer.queue.clear() # this is for when muting, some packets haven't returned yet and will play
# once un-muted from the buffer. Since I haven't implemented a UDP system where each packet is timestamped and
# reconstructed chronologically (hence dropping older packets and making this issue moot),
# this will have to do.
self._is_muted = False if self._is_muted else True
def start(self):
self._incoming_buffer.queue.clear()
self._outgoing_buffer.queue.clear()
self._stream.start()
def stop(self):
self._stream.stop()
class Client:
def __init__(self, default_audio_devices=None):
# Client class handles the internet IO, and passes the audio data to the AudioHandler class.
"""
:param default_audio_devices: Query by name or index, the audio devices to be used. Use get_sound_devices() method to list all audio devices.
Client class that handles audio and internet IO.
"""
self._is_muted = threading.Event()
self._outgoing_buffer = queue.Queue()
self._incoming_buffer = queue.Queue()
self._audio_handler = _AudioHandler(self._outgoing_buffer, self._incoming_buffer,
audio_devices=default_audio_devices)
self._socket = None
self._is_connected = False
self._internet_io_flag = threading.Event()
self._internet_thread = None
def _internet_io(self, ):
while not self._internet_io_flag.is_set():
try:
readable, writable, exceptional = select.select([self._socket], [self._socket], [self._socket])
except ValueError:
logging.info("Disconnect!")
break
if readable:
try:
data = self._socket.recv(config.PACKET_SIZE)
# data, header = self._read_header(packed_data)
self._audio_handler._decoder.process(data) # messy but since the callback audio func only runs
# whenever it has enough samples of audio to send, the audio needs to be processed by the time it
# does a callback.
except ConnectionResetError:
logging.error("Disconnected!")
break
except TimeoutError:
logging.error(f"Timed out! {traceback.format_exc()}")
if writable and self._outgoing_buffer.qsize() > 0:
data = self._outgoing_buffer.get()
try:
self._socket.send(data)
except ConnectionResetError:
logging.info("Disconnected!")
break
if exceptional:
logging.info("Disconnected!")
break
@staticmethod
def _add_header(data, metadata):
return bytearray(f"{metadata:<{config.HEADER_SIZE}}", "utf-8") + bytearray(data)
@staticmethod
def _read_header(data):
return bytes(data[config.HEADER_SIZE:]), data[:config.HEADER_SIZE].decode("utf-8", errors="ignore")
@staticmethod
def get_sound_devices(*args):
return sd.query_devices(*args)
def connect(self, ip: str, port: int) -> bool:
"""
:param ip: IP/Hostname of the server.
:param port: Port of the server.
:return: Boolean if the client is successfully connected.
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Putting the socket here so if the
# socket closes, a new socket instance can be made on method call.
self._internet_io_flag.clear()
self._internet_thread = threading.Thread(target=Client._internet_io, args=(self,), daemon=True)
self._internet_thread.start()
self._is_connected = False
logging.info(f"Connecting to {ip}: {port}...")
try:
self._socket.connect((ip, int(port)))
self._socket.setblocking(False)
self._socket.settimeout(config.SOCKET_TIMEOUT)
self._is_connected = True
logging.info("Connected!")
except ConnectionRefusedError as error:
logging.info(error)
return self._is_connected
def disconnect(self):
if self._is_connected:
self._audio_handler.stop()
self._internet_io_flag.set()
self._socket.close()
self._is_connected = False
else:
logging.info("Not connected to a socket!")
def start_talking(self):
if self._is_connected:
self._audio_handler.start()
else:
logging.info("Not connected to a socket!")
def stop_talking(self):
self._audio_handler.stop()
def toggle_mute(self):
self._audio_handler._toggle_mute()
| 35.77561 | 150 | 0.594082 |
643459f79244909ac8734abcb45f29d2e1bfa925 | 1,951 | py | Python | tryalgo/fenwick.py | siebenbrunner/tryalgo | b519f45b6babe0cdac8bbec267941c7ec29df004 | [
"MIT"
] | null | null | null | tryalgo/fenwick.py | siebenbrunner/tryalgo | b519f45b6babe0cdac8bbec267941c7ec29df004 | [
"MIT"
] | null | null | null | tryalgo/fenwick.py | siebenbrunner/tryalgo | b519f45b6babe0cdac8bbec267941c7ec29df004 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Fenwick tree
jill-jenn vie et christoph durr - 2014-2018
"""
# snip{
class Fenwick:
"""maintains a tree to allow quick updates and queries
"""
def __init__(self, t):
"""stores a table t and allows updates and queries
of prefix sums in logarithmic time.
:param array t: with numerical values
"""
self.s = [0] * (len(t) + 1) # create internal storage
for a in range(len(t)):
self.add(a, t[a]) # initialize
# pylint: disable=redefined-builtin
def prefixSum(self, a):
"""
:param int a: index in t, negative a will return 0
:returns: t[0] + ... + t[a]
"""
i = a + 1 # internal index starts at 1
total = 0
while i > 0: # loops over neighbors
total += self.s[i] # cumulative sum
i -= (i & -i) # left neighbor
return total
def intervalSum(self, a, b):
"""
:param int a b: with 0 <= a <= b
:returns: t[a] + ... + t[b]
"""
return self.prefixSum(b) - self.prefixSum(a-1)
def add(self, a, val):
"""
:param int a: index in t
:modifies: adds val to t[a]
"""
i = a + 1 # internal index starts at 1
while i < len(self.s): # loops over parents
self.s[i] += val # update node
i += (i & -i) # parent
# variante:
# pylint: disable=bad-whitespace
def intervalAdd(self, a, b, val):
"""Variant, adds val to t[a], to t[a + 1] ... and to t[b]
:param int a b: with 0 <= a <= b < len(t)
"""
self.add(a, +val)
self.add(b + 1, -val)
def get(self, a):
"""Variant, reads t[a]
:param int i: negative a will return 0
"""
return self.prefixSum(a)
# snip}
| 27.871429 | 65 | 0.479754 |
893a5c0279f48b67ef25e0ab35319e3d91e228d3 | 94 | py | Python | factory/source/handlers/__init__.py | icYFTL/RTULAB_Service | a16d0fc2ac9ac103f0a14e90824caded7156bf11 | [
"Apache-2.0"
] | null | null | null | factory/source/handlers/__init__.py | icYFTL/RTULAB_Service | a16d0fc2ac9ac103f0a14e90824caded7156bf11 | [
"Apache-2.0"
] | null | null | null | factory/source/handlers/__init__.py | icYFTL/RTULAB_Service | a16d0fc2ac9ac103f0a14e90824caded7156bf11 | [
"Apache-2.0"
] | null | null | null | from .factory import Factory
from .provider import Provider
__all__ = ['Factory', 'Provider'] | 23.5 | 33 | 0.765957 |
53b5ac41a1232af10a1813e2644134a14b15ec0d | 1,388 | py | Python | amy/workshops/migrations/0163_auto_20181106_1416.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 53 | 2015-01-10T17:39:19.000Z | 2019-06-12T17:36:34.000Z | amy/workshops/migrations/0163_auto_20181106_1416.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 1,176 | 2015-01-02T06:32:47.000Z | 2019-06-18T11:57:47.000Z | amy/workshops/migrations/0163_auto_20181106_1416.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 44 | 2015-01-03T15:08:56.000Z | 2019-06-09T05:33:08.000Z | # Generated by Django 2.1.2 on 2018-11-06 19:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshops', '0162_auto_20181028_0754'),
]
operations = [
migrations.AlterField(
model_name='workshoprequest',
name='requested_workshop_types',
field=models.ManyToManyField(help_text="If your learners are new to programming and primarily interested in working with data, Data Carpentry is likely the best choice. If your learners are interested in learning more about programming, including version control and automation, Software Carpentry is likely the best match. If your learners are people working in library and information related roles interested in learning data and software skills, Library Carpentry is the best choice. Please visit the <a href='https://software-carpentry.org/lessons/'>Software Carpentry lessons page</a>, <a href='http://www.datacarpentry.org/lessons/'>Data Carpentry lessons page</a>, or the <a href='https://librarycarpentry.org/'>Library Carpentry</a> for more information about any of our lessons. If you’re not sure and would like to discuss with us, please select the 'Not sure' option below.", limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentry workshop are you requesting?'),
),
]
| 73.052632 | 1,023 | 0.746398 |
0d494adc81ceaa6210738844f46e4342d1258ed0 | 191 | py | Python | tests/func/regressions/test_issue_54_collector_unicode.py | boriel/parglare | 74a6d98b6e510ae3c814c517924796c5dccefae0 | [
"MIT"
] | 102 | 2017-01-27T17:16:07.000Z | 2022-03-18T17:44:27.000Z | tests/func/regressions/test_issue_54_collector_unicode.py | boriel/parglare | 74a6d98b6e510ae3c814c517924796c5dccefae0 | [
"MIT"
] | 134 | 2017-08-28T14:30:51.000Z | 2022-01-08T11:45:07.000Z | tests/func/regressions/test_issue_54_collector_unicode.py | boriel/parglare | 74a6d98b6e510ae3c814c517924796c5dccefae0 | [
"MIT"
] | 28 | 2016-12-31T15:03:24.000Z | 2022-03-21T19:49:19.000Z | from parglare import get_collector
def test_collector_can_use_unicode_in_python_2():
action = get_collector()
def f(context, node):
return node
action('f_action')(f)
| 15.916667 | 49 | 0.706806 |
286ba73405b9159da5d794643c37604b56b7dc50 | 144 | py | Python | 20160822.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | 20160822.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | 20160822.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | #_*_ coding: utf-8 _*_
import sys
from PyQt4.QtGui import *
app = QApplication(sys.argv)
label = QLabel("Hello PyQt")
label.show()
app.exec_() | 16 | 28 | 0.708333 |
f90230701fca4588e01aa0f39587978a7611f34f | 17,810 | py | Python | log_mito_bcl2/model_216.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_mito_bcl2/model_216.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_mito_bcl2/model_216.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 54000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 92.279793 | 798 | 0.801011 |
7d77f279b7c24f093a61c6ed28fa0d0e76b18c16 | 8,042 | py | Python | kubernetes/client/models/v1beta2_flow_schema_spec.py | philipp-sontag-by/python | 51c481692ab0d9c71b9dd96342bfa93b721b029d | [
"Apache-2.0"
] | 1 | 2022-02-22T23:10:55.000Z | 2022-02-22T23:10:55.000Z | kubernetes/client/models/v1beta2_flow_schema_spec.py | philipp-sontag-by/python | 51c481692ab0d9c71b9dd96342bfa93b721b029d | [
"Apache-2.0"
] | 6 | 2021-09-13T19:03:02.000Z | 2022-03-16T18:56:42.000Z | kubernetes/client/models/v1beta2_flow_schema_spec.py | philipp-sontag-by/python | 51c481692ab0d9c71b9dd96342bfa93b721b029d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta2FlowSchemaSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'distinguisher_method': 'V1beta2FlowDistinguisherMethod',
'matching_precedence': 'int',
'priority_level_configuration': 'V1beta2PriorityLevelConfigurationReference',
'rules': 'list[V1beta2PolicyRulesWithSubjects]'
}
attribute_map = {
'distinguisher_method': 'distinguisherMethod',
'matching_precedence': 'matchingPrecedence',
'priority_level_configuration': 'priorityLevelConfiguration',
'rules': 'rules'
}
def __init__(self, distinguisher_method=None, matching_precedence=None, priority_level_configuration=None, rules=None, local_vars_configuration=None): # noqa: E501
"""V1beta2FlowSchemaSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._distinguisher_method = None
self._matching_precedence = None
self._priority_level_configuration = None
self._rules = None
self.discriminator = None
if distinguisher_method is not None:
self.distinguisher_method = distinguisher_method
if matching_precedence is not None:
self.matching_precedence = matching_precedence
self.priority_level_configuration = priority_level_configuration
if rules is not None:
self.rules = rules
@property
def distinguisher_method(self):
"""Gets the distinguisher_method of this V1beta2FlowSchemaSpec. # noqa: E501
:return: The distinguisher_method of this V1beta2FlowSchemaSpec. # noqa: E501
:rtype: V1beta2FlowDistinguisherMethod
"""
return self._distinguisher_method
@distinguisher_method.setter
def distinguisher_method(self, distinguisher_method):
"""Sets the distinguisher_method of this V1beta2FlowSchemaSpec.
:param distinguisher_method: The distinguisher_method of this V1beta2FlowSchemaSpec. # noqa: E501
:type: V1beta2FlowDistinguisherMethod
"""
self._distinguisher_method = distinguisher_method
@property
def matching_precedence(self):
"""Gets the matching_precedence of this V1beta2FlowSchemaSpec. # noqa: E501
`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
:return: The matching_precedence of this V1beta2FlowSchemaSpec. # noqa: E501
:rtype: int
"""
return self._matching_precedence
@matching_precedence.setter
def matching_precedence(self, matching_precedence):
"""Sets the matching_precedence of this V1beta2FlowSchemaSpec.
`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
:param matching_precedence: The matching_precedence of this V1beta2FlowSchemaSpec. # noqa: E501
:type: int
"""
self._matching_precedence = matching_precedence
@property
def priority_level_configuration(self):
"""Gets the priority_level_configuration of this V1beta2FlowSchemaSpec. # noqa: E501
:return: The priority_level_configuration of this V1beta2FlowSchemaSpec. # noqa: E501
:rtype: V1beta2PriorityLevelConfigurationReference
"""
return self._priority_level_configuration
@priority_level_configuration.setter
def priority_level_configuration(self, priority_level_configuration):
"""Sets the priority_level_configuration of this V1beta2FlowSchemaSpec.
:param priority_level_configuration: The priority_level_configuration of this V1beta2FlowSchemaSpec. # noqa: E501
:type: V1beta2PriorityLevelConfigurationReference
"""
if self.local_vars_configuration.client_side_validation and priority_level_configuration is None: # noqa: E501
raise ValueError("Invalid value for `priority_level_configuration`, must not be `None`") # noqa: E501
self._priority_level_configuration = priority_level_configuration
@property
def rules(self):
"""Gets the rules of this V1beta2FlowSchemaSpec. # noqa: E501
`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
:return: The rules of this V1beta2FlowSchemaSpec. # noqa: E501
:rtype: list[V1beta2PolicyRulesWithSubjects]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1beta2FlowSchemaSpec.
`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
:param rules: The rules of this V1beta2FlowSchemaSpec. # noqa: E501
:type: list[V1beta2PolicyRulesWithSubjects]
"""
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2FlowSchemaSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2FlowSchemaSpec):
return True
return self.to_dict() != other.to_dict()
| 39.421569 | 376 | 0.678687 |
8afc163b4a246a30840e0de542e2b8787b8776ea | 10,386 | py | Python | TEST_PROJET-1.0-pc/renpy/bootstrap.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | TEST_PROJET-1.0-pc/renpy/bootstrap.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | TEST_PROJET-1.0-pc/renpy/bootstrap.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | # Copyright 2004-2019 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import os.path
import sys
import subprocess
import io
FSENCODING = sys.getfilesystemencoding() or "utf-8"
# Sets the default encoding to the filesystem encoding.
old_stdout = sys.stdout
old_stderr = sys.stderr
reload(sys)
sys.setdefaultencoding(FSENCODING) # @UndefinedVariable
sys.stdout = old_stdout
sys.stderr = old_stderr
import renpy.error
# Extra things used for distribution.
def extra_imports():
import datetime; datetime
import encodings.ascii; encodings.ascii
import encodings.utf_8; encodings.utf_8
import encodings.zlib_codec; encodings.zlib_codec
import encodings.unicode_escape; encodings.unicode_escape
import encodings.string_escape; encodings.string_escape
import encodings.raw_unicode_escape; encodings.raw_unicode_escape
import encodings.mbcs; encodings.mbcs
import encodings.utf_16; encodings.utf_16
import encodings.utf_16_be; encodings.utf_16_be
import encodings.utf_16_le; encodings.utf_16_le
import encodings.utf_32_be; encodings.utf_32_be
import encodings.latin_1; encodings.latin_1
import encodings.hex_codec; encodings.hex_codec
import encodings.base64_codec; encodings.base64_codec
import encodings.idna; encodings.idna
import math; math
import glob; glob
import pickle; pickle
import difflib; difflib
import shutil; shutil
import tarfile; tarfile
import bz2; bz2 # @UnresolvedImport
import webbrowser; webbrowser
import posixpath; posixpath
import ctypes; ctypes
import ctypes.wintypes; ctypes.wintypes
import argparse; argparse
import compiler; compiler
import textwrap; textwrap
import copy; copy
import urllib; urllib
import urllib2; urllib2
import codecs; codecs
import rsa; rsa
import decimal; decimal
import plistlib; plistlib
import _renpysteam; _renpysteam
import compileall; compileall
import cProfile; cProfile
import pstats; pstats
import _ssl; _ssl
import SimpleHTTPServer; SimpleHTTPServer
import wave; wave
import sunau; sunau
# Used by requests.
import cgi; cgi
import Cookie; Cookie
import hmac; hmac
import Queue; Queue
import uuid; uuid
class NullFile(io.IOBase):
"""
This file raises an error on input, and IOError on read.
"""
def write(self, s):
return
def read(self, length=None):
raise IOError("Not implemented.")
def null_files():
try:
if sys.stderr.fileno() < 0:
sys.stderr = NullFile()
if sys.stdout.fileno() < 0:
sys.stdout = NullFile()
except:
pass
null_files()
trace_file = None
trace_local = None
def trace_function(frame, event, arg):
fn = os.path.basename(frame.f_code.co_filename)
print(fn, frame.f_lineno, frame.f_code.co_name, event, file=trace_file)
return trace_local
def enable_trace(level):
global trace_file
global trace_local
trace_file = file("trace.txt", "w", 1)
if level > 1:
trace_local = trace_function
else:
trace_local = None
sys.settrace(trace_function)
def mac_start(fn):
os.system("open " + fn)
# This code fixes a bug in subprocess.Popen.__del__
def popen_del(self, *args, **kwargs):
return
def bootstrap(renpy_base):
global renpy # W0602
import renpy.log # @UnusedImport
# Remove a legacy environment setting.
if os.environ.get(b"SDL_VIDEODRIVER", "") == "windib":
del os.environ[b"SDL_VIDEODRIVER"]
renpy_base = unicode(renpy_base, FSENCODING, "replace")
# If environment.txt exists, load it into the os.environ dictionary.
if os.path.exists(renpy_base + "/environment.txt"):
evars = { }
execfile(renpy_base + "/environment.txt", evars)
for k, v in evars.iteritems():
if k not in os.environ:
os.environ[k] = str(v)
# Also look for it in an alternate path (the path that contains the
# .app file.), if on a mac.
alt_path = os.path.abspath("renpy_base")
if ".app" in alt_path:
alt_path = alt_path[:alt_path.find(".app")+4]
if os.path.exists(alt_path + "/environment.txt"):
evars = { }
execfile(alt_path + "/environment.txt", evars)
for k, v in evars.iteritems():
if k not in os.environ:
os.environ[k] = str(v)
# Get a working name for the game.
name = os.path.basename(sys.argv[0])
if name.find(".") != -1:
name = name[:name.find(".")]
# Parse the arguments.
import renpy.arguments
args = renpy.arguments.bootstrap()
if args.trace:
enable_trace(args.trace)
if args.basedir:
basedir = os.path.abspath(args.basedir).decode(FSENCODING)
else:
basedir = renpy_base
if not os.path.exists(basedir):
sys.stderr.write("Base directory %r does not exist. Giving up.\n" % (basedir,))
sys.exit(1)
gamedirs = [ name ]
game_name = name
while game_name:
prefix = game_name[0]
game_name = game_name[1:]
if prefix == ' ' or prefix == '_':
gamedirs.append(game_name)
gamedirs.extend([ 'game', 'data', 'launcher/game' ])
for i in gamedirs:
if i == "renpy":
continue
gamedir = basedir + "/" + i
if os.path.isdir(gamedir):
break
else:
gamedir = basedir
sys.path.insert(0, basedir)
if renpy.macintosh:
# If we're on a mac, install our own os.start.
os.startfile = mac_start
# Are we starting from inside a mac app resources directory?
if basedir.endswith("Contents/Resources/autorun"):
renpy.macapp = True
# Check that we have installed pygame properly. This also deals with
# weird cases on Windows and Linux where we can't import modules. (On
# windows ";" is a directory separator in PATH, so if it's in a parent
# directory, we won't get the libraries in the PATH, and hence pygame
# won't import.)
try:
import pygame_sdl2
if not ("pygame" in sys.modules):
pygame_sdl2.import_as_pygame()
except:
print("""\
Could not import pygame_sdl2. Please ensure that this program has been built
and unpacked properly. Also, make sure that the directories containing
this program do not contain : or ; in their names.
You may be using a system install of python. Please run {0}.sh,
{0}.exe, or {0}.app instead.
""".format(name), file=sys.stderr)
raise
# If we're not given a command, show the presplash.
if args.command == "run" and not renpy.mobile:
import renpy.display.presplash # @Reimport
renpy.display.presplash.start(basedir, gamedir)
# Ditto for the Ren'Py module.
try:
import _renpy; _renpy
except:
print("""\
Could not import _renpy. Please ensure that this program has been built
and unpacked properly.
You may be using a system install of python. Please run {0}.sh,
{0}.exe, or {0}.app instead.
""".format(name), file=sys.stderr)
raise
# Load up all of Ren'Py, in the right order.
import renpy # @Reimport
renpy.import_all()
renpy.loader.init_importer()
exit_status = None
try:
while exit_status is None:
exit_status = 1
try:
renpy.game.args = args
renpy.config.renpy_base = renpy_base
renpy.config.basedir = basedir
renpy.config.gamedir = gamedir
renpy.config.args = [ ]
if renpy.android:
renpy.config.logdir = os.environ['ANDROID_PUBLIC']
else:
renpy.config.logdir = basedir
if not os.path.exists(renpy.config.logdir):
os.makedirs(renpy.config.logdir, 0o777)
renpy.main.main()
exit_status = 0
except KeyboardInterrupt:
raise
except renpy.game.UtterRestartException:
# On an UtterRestart, reload Ren'Py.
renpy.reload_all()
exit_status = None
except renpy.game.QuitException as e:
exit_status = e.status
if e.relaunch:
if hasattr(sys, "renpy_executable"):
subprocess.Popen([sys.renpy_executable] + sys.argv[1:])
else:
subprocess.Popen([sys.executable, "-EO"] + sys.argv)
except renpy.game.ParseErrorException:
pass
except Exception as e:
renpy.error.report_exception(e)
pass
sys.exit(exit_status)
finally:
if "RENPY_SHUTDOWN_TRACE" in os.environ:
enable_trace(int(os.environ["RENPY_SHUTDOWN_TRACE"]))
renpy.display.im.cache.quit()
if renpy.display.draw:
renpy.display.draw.quit()
renpy.audio.audio.quit()
# Prevent subprocess from throwing errors while trying to run it's
# __del__ method during shutdown.
if not renpy.emscripten:
subprocess.Popen.__del__ = popen_del
| 28.532967 | 87 | 0.646062 |
6b328ff8411aedcb2e14900d00ab1c687296e18a | 3,070 | py | Python | tests/cache/test_cache_save_evaluation.py | EddLabs/eddington-static | cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5 | [
"Apache-2.0"
] | null | null | null | tests/cache/test_cache_save_evaluation.py | EddLabs/eddington-static | cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5 | [
"Apache-2.0"
] | null | null | null | tests/cache/test_cache_save_evaluation.py | EddLabs/eddington-static | cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5 | [
"Apache-2.0"
] | null | null | null | import datetime
import random
from unittest import mock
import pytest
from statue.cache import Cache
from statue.exceptions import CacheError
from tests.util import dummy_time_stamps, successful_evaluation_mock
def test_save_evaluation(tmp_path, mock_evaluation_load_from_file):
timestamp = 12300566
cache_dir = tmp_path / "cache"
evaluations_dir = cache_dir / "evaluations"
evaluations_dir.mkdir(parents=True)
evaluation_path = evaluations_dir / f"evaluation-{timestamp}.json"
evaluation = mock.Mock()
evaluation.timestamp = datetime.datetime.fromtimestamp(timestamp)
evaluation.save_as_json.side_effect = lambda path: path.touch()
size = random.randint(1, 100)
cache = Cache(size=size, cache_root_directory=cache_dir)
assert not evaluation_path.exists()
cache.save_evaluation(evaluation)
evaluation.save_as_json.assert_called_with(evaluation_path)
mock_evaluation_load_from_file.assert_not_called()
assert evaluation_path.exists()
@pytest.mark.parametrize("size", [random.randint(2, 100), 1])
def test_save_evaluation_deletes_old_evaluations( # pylint: disable=too-many-locals
tmp_path, mock_evaluation_load_from_file, size
):
cache_dir = tmp_path / "cache"
evaluations_dir = cache_dir / "evaluations"
evaluations_dir.mkdir(parents=True)
time_stamps = dummy_time_stamps(size + 1)
old_time_stamps, recent_time_stamp = time_stamps[:-1], time_stamps[-1]
old_evaluation_paths = [
evaluations_dir / f"evaluation-{int(time_stamp.timestamp())}.json"
for time_stamp in old_time_stamps
]
for old_evaluation_file in old_evaluation_paths:
old_evaluation_file.touch()
recent_evaluation_path = (
evaluations_dir / f"evaluation-{int(recent_time_stamp.timestamp())}.json"
)
recent_evaluation = successful_evaluation_mock(timestamp=recent_time_stamp)
old_evaluations = [
successful_evaluation_mock(timestamp=timestamp) for timestamp in old_time_stamps
]
recent_evaluation.save_as_json.side_effect = lambda path: path.touch()
evaluation_paths_dict = dict(zip(old_evaluation_paths, old_evaluations))
evaluation_paths_dict[recent_evaluation_path] = recent_evaluation
mock_evaluation_load_from_file.side_effect = evaluation_paths_dict.get
cache = Cache(size=size, cache_root_directory=cache_dir)
assert not recent_evaluation_path.exists()
cache.save_evaluation(recent_evaluation)
assert recent_evaluation_path.exists()
recent_evaluation.save_as_json.assert_called_with(recent_evaluation_path)
for i, old_evaluation_file in enumerate(old_evaluation_paths[1:]):
assert old_evaluation_file.exists(), f"The {i}th old file does not exist."
assert not old_evaluation_paths[0].exists()
def test_cache_save_evaluation_fails_when_no_root_dir_was_set():
size = random.randint(1, 100)
cache = Cache(size=size)
evaluation = mock.Mock()
with pytest.raises(CacheError, match="^Cache directory was not specified$"):
cache.save_evaluation(evaluation)
| 36.547619 | 88 | 0.767752 |
7cc2a7a2868e111ef2e528b445d67bcd9db8d422 | 5,708 | py | Python | conf.py | ubclaunchpad/rocket2 | 7a4f05f46229d1c9a900aac1694b3d822f9d6b0f | [
"MIT"
] | 14 | 2019-01-20T21:54:36.000Z | 2021-10-09T21:06:23.000Z | conf.py | ubclaunchpad/rocket2 | 7a4f05f46229d1c9a900aac1694b3d822f9d6b0f | [
"MIT"
] | 510 | 2018-11-18T20:07:51.000Z | 2022-02-01T15:34:03.000Z | conf.py | ubclaunchpad/rocket2.0 | 7a4f05f46229d1c9a900aac1694b3d822f9d6b0f | [
"MIT"
] | 9 | 2019-08-20T16:57:21.000Z | 2021-05-04T12:51:47.000Z | """
Configuration file for the Sphinx documentation builder.
This file does only contain a selection of the most common options. For a
full list see the documentation:
http://www.sphinx-doc.org/en/master/config
-- Path setup --------------------------------------------------------------
If extensions (or modules to document with autodoc) are in another directory,
add these directories to sys.path here. If the directory is relative to the
documentation root, use os.path.abspath to make it absolute, like shown here.
"""
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Rocket 2'
copyright = '2018, UBC Launch Pad'
author = 'UBC Launch Pad'
# The short X.Y version
version = '2.0'
# The full version, including alpha/beta/rc tags
release = '2.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx_autodoc_typehints',
]
# Config options for the extensions
typehints_fully_qualified = True
always_document_param_types = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['docs/_templates']
# The suffix(es) of source filenames.
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md',
'.github/*', '*/README.md']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['docs/_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Rocket2doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Rocket2.tex', 'Rocket 2 Documentation',
'UBC Launch Pad', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rocket2', 'Rocket 2 Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Rocket2', 'Rocket 2 Documentation',
author, 'Rocket2', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
def skip(app, what, name, obj, autoskip, options):
"""Make sure we don't skip __init__ functions."""
return False if name == '__init__' else autoskip
def setup(app):
"""Set up sphinx."""
app.connect('autodoc-skip-member', skip)
| 29.42268 | 78 | 0.649264 |
93a36c6c285bb1ff607c0185a9fffc9e8ce1d35f | 4,546 | bzl | Python | test/packetimpact/runner/defs.bzl | adamliyi/gvisor | f20e63e31b56784c596897e86f03441f9d05f567 | [
"Apache-2.0"
] | 1 | 2020-08-14T14:14:34.000Z | 2020-08-14T14:14:34.000Z | test/packetimpact/runner/defs.bzl | eilhan/gvisor | 190634e0fcf4cf25a449e1bd39533ca2ddad66e6 | [
"Apache-2.0"
] | null | null | null | test/packetimpact/runner/defs.bzl | eilhan/gvisor | 190634e0fcf4cf25a449e1bd39533ca2ddad66e6 | [
"Apache-2.0"
] | null | null | null | """Defines rules for packetimpact test targets."""
load("//tools:defs.bzl", "go_test")
def _packetimpact_test_impl(ctx):
test_runner = ctx.executable._test_runner
bench = ctx.actions.declare_file("%s-bench" % ctx.label.name)
bench_content = "\n".join([
"#!/bin/bash",
# This test will run part in a distinct user namespace. This can cause
# permission problems, because all runfiles may not be owned by the
# current user, and no other users will be mapped in that namespace.
# Make sure that everything is readable here.
"find . -type f -or -type d -exec chmod a+rx {} \\;",
"%s %s --testbench_binary %s $@\n" % (
test_runner.short_path,
" ".join(ctx.attr.flags),
ctx.files.testbench_binary[0].short_path,
),
])
ctx.actions.write(bench, bench_content, is_executable = True)
transitive_files = []
if hasattr(ctx.attr._test_runner, "data_runfiles"):
transitive_files.append(ctx.attr._test_runner.data_runfiles.files)
runfiles = ctx.runfiles(
files = [test_runner] + ctx.files.testbench_binary + ctx.files._posix_server_binary,
transitive_files = depset(transitive = transitive_files),
collect_default = True,
collect_data = True,
)
return [DefaultInfo(executable = bench, runfiles = runfiles)]
_packetimpact_test = rule(
attrs = {
"_test_runner": attr.label(
executable = True,
cfg = "target",
default = ":packetimpact_test",
),
"_posix_server_binary": attr.label(
cfg = "target",
default = "//test/packetimpact/dut:posix_server",
),
"testbench_binary": attr.label(
cfg = "target",
mandatory = True,
),
"flags": attr.string_list(
mandatory = False,
default = [],
),
},
test = True,
implementation = _packetimpact_test_impl,
)
PACKETIMPACT_TAGS = [
"local",
"manual",
"packetimpact",
]
def packetimpact_native_test(
name,
testbench_binary,
expect_failure = False,
**kwargs):
"""Add a native packetimpact test.
Args:
name: name of the test
testbench_binary: the testbench binary
expect_failure: the test must fail
**kwargs: all the other args, forwarded to _packetimpact_test
"""
expect_failure_flag = ["--expect_failure"] if expect_failure else []
_packetimpact_test(
name = name + "_native_test",
testbench_binary = testbench_binary,
flags = ["--native"] + expect_failure_flag,
tags = PACKETIMPACT_TAGS,
**kwargs
)
def packetimpact_netstack_test(
name,
testbench_binary,
expect_failure = False,
**kwargs):
"""Add a packetimpact test on netstack.
Args:
name: name of the test
testbench_binary: the testbench binary
expect_failure: the test must fail
**kwargs: all the other args, forwarded to _packetimpact_test
"""
expect_failure_flag = []
if expect_failure:
expect_failure_flag = ["--expect_failure"]
_packetimpact_test(
name = name + "_netstack_test",
testbench_binary = testbench_binary,
# Note that a distinct runtime must be provided in the form
# --test_arg=--runtime=other when invoking bazel.
flags = expect_failure_flag,
tags = PACKETIMPACT_TAGS,
**kwargs
)
def packetimpact_go_test(name, size = "small", pure = True, expect_native_failure = False, expect_netstack_failure = False, **kwargs):
"""Add packetimpact tests written in go.
Args:
name: name of the test
size: size of the test
pure: make a static go binary
expect_native_failure: the test must fail natively
expect_netstack_failure: the test must fail for Netstack
**kwargs: all the other args, forwarded to go_test
"""
testbench_binary = name + "_test"
go_test(
name = testbench_binary,
size = size,
pure = pure,
tags = [
"local",
"manual",
],
**kwargs
)
packetimpact_native_test(
name = name,
expect_failure = expect_native_failure,
testbench_binary = testbench_binary,
)
packetimpact_netstack_test(
name = name,
expect_failure = expect_netstack_failure,
testbench_binary = testbench_binary,
)
| 31.569444 | 134 | 0.612846 |
2181cee1be162d73ad62070828e01d52edc2c593 | 5,960 | py | Python | authentication/cryptosign/tls/client_ssh_agent.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | null | null | null | authentication/cryptosign/tls/client_ssh_agent.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | null | null | null | authentication/cryptosign/tls/client_ssh_agent.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | null | null | null | ###############################################################################
##
## Copyright (C) Tavendo GmbH and/or collaborators. All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
##
###############################################################################
import os
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn import util
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.cryptosign import SSHAgentSigningKey
class ClientSession(ApplicationSession):
"""
A WAMP client component authenticating using WAMP-cryptosign using
a private (Ed25519) key held in SSH agent.
"""
@inlineCallbacks
def onConnect(self):
print("onConnect()")
print('Using public key {}'.format(self.config.extra['pubkey']))
# create a proxy signing key with the private key being held in SSH agent
self._key = yield SSHAgentSigningKey.new(self.config.extra['pubkey'])
# authentication extra information for wamp-cryptosign
extra = {
# forward the client pubkey: this allows us to omit authid as
# the router can identify us with the pubkey already
'pubkey': self._key.public_key(),
# request channel binding
'channel_binding': 'tls-unique'
}
# join and authenticate using WAMP-cryptosign
self.join(self.config.realm,
authmethods=['cryptosign'],
authid=self.config.extra['authid'],
authextra=extra)
def onChallenge(self, challenge):
print("onChallenge(challenge={})".format(challenge))
# router has sent us a challenge .. sign it and return the signature
# the actual signing is done within SSH agent! that means: the private key
# is actually _never_ touched (other than by SSH agent itself)
return self._key.sign_challenge(self, challenge)
def onJoin(self, details):
print("onJoin(details={})".format(details))
print("\nHooray! We've been successfully authenticated with WAMP-cryptosign using Ed25519!\n")
self.leave()
def onLeave(self, details):
print("onLeave(details={})".format(details))
self.disconnect()
def onDisconnect(self):
print("onDisconnect()")
reactor.stop()
if __name__ == '__main__':
import argparse
from autobahn.twisted.wamp import ApplicationRunner
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--authid', dest='authid', type=str, default=None, help='The authid to connect under. If not provided, let the router auto-choose the authid (based on client public key).')
parser.add_argument('--realm', dest='realm', type=str, default=None, help='The realm to join. If not provided, let the router auto-choose the realm.')
parser.add_argument('--pubkey', dest='pubkey', type=str, default=None, help='Filename of the client SSH Ed25519 public key.')
parser.add_argument('--trustroot', dest='trustroot', type=str, default=None, help='Filename of the router SSH Ed25519 public key (for server verification).')
parser.add_argument('--url', dest='url', type=str, default='ws://localhost:8080/ws', help='The router URL (default: ws://localhost:8080/ws).')
parser.add_argument('--agent', dest='agent', type=str, default=None, help='Path to Unix domain socket of SSH agent to use.')
parser.add_argument('--trace', dest='trace', action='store_true', default=False, help='Trace traffic: log WAMP messages sent and received')
options = parser.parse_args()
print("Connecting to {}: realm={}, authid={}, pubkey={}, trustroot={}".format(options.url, options.realm, options.authid, options.pubkey, options.trustroot))
if options.pubkey is None:
options.pubkey = os.path.expanduser('~/.ssh/id_ed25519.pub')
# load client public key
with open(options.pubkey, 'r') as f:
pubkey = f.read()
if type(pubkey) == bytes:
pubkey = pubkey.decode('ascii')
# load router public key (optional, if avail., router will be authenticated too)
trustroot = None
if options.trustroot:
with open(options.trustroot, 'r') as f:
trustroot = f.read()
if type(trustroot) == bytes:
trustroot = trustroot.decode('ascii')
# forward stuff to our session
extra = {
'authid': options.authid,
'pubkey': pubkey,
'trustroot': trustroot
}
runner = ApplicationRunner(url=options.url, realm=options.realm, extra=extra)
runner.run(ClientSession)
| 43.823529 | 196 | 0.670805 |
a26426c33c86ee930b2714bc9fef0b8eb2cfce57 | 3,735 | py | Python | tests/st/pynative/data_parallel/test_pynative_hccl.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | tests/st/pynative/data_parallel/test_pynative_hccl.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | tests/st/pynative/data_parallel/test_pynative_hccl.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test bert thor performance with 8p on mlperf dataset"""
import os
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
import mindspore.communication.management as D
from mindspore import context
from mindspore.context import ParallelMode
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
np.random.seed(1)
os.environ['GLOG_v'] = str(2)
class AllReduceNet(nn.Cell):
def __init__(self):
super(AllReduceNet, self).__init__()
self.all_reduce = P.AllReduce()
def construct(self, x):
return self.all_reduce(x)
def train_allreduce_8p(q, device_id, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=device_id)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
net = AllReduceNet()
input_x = np.ones([32, 255, 255, 3]).astype(np.float32)
except_output = input_x * 8
output = net(Tensor(input_x, mstype.float32))
q.put(np.allclose(output.asnumpy(), except_output))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_pynative_hccl_8p():
device_num = 8
process = []
q = Queue()
for i in range(device_num):
device_id = i
process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
# check result
for i in range(device_num):
assert not q.empty()
assert q.get()
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_pynative_hccl_8pv2():
os.environ['GRAPH_OP_RUN'] = str(1)
device_num = 8
process = []
q = Queue()
for i in range(device_num):
device_id = i
process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
# check result
for i in range(device_num):
assert not q.empty()
assert q.get()
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
| 30.867769 | 100 | 0.687818 |
7ac77dc1d37556db490dc370263b56cada06ef9d | 3,543 | py | Python | venv/Lib/site-packages/scipy/special/tests/test_hypergeometric.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | venv/Lib/site-packages/scipy/special/tests/test_hypergeometric.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | venv/Lib/site-packages/scipy/special/tests/test_hypergeometric.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_equal
import scipy.special as sc
class TestHyperu(object):
def test_negative_x(self):
a, b, x = np.meshgrid(
[-1, -0.5, 0, 0.5, 1],
[-1, -0.5, 0, 0.5, 1],
np.linspace(-100, -1, 10),
)
assert np.all(np.isnan(sc.hyperu(a, b, x)))
def test_special_cases(self):
assert sc.hyperu(0, 1, 1) == 1.0
@pytest.mark.parametrize('a', [0.5, 1, np.nan])
@pytest.mark.parametrize('b', [1, 2, np.nan])
@pytest.mark.parametrize('x', [0.25, 3, np.nan])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyperu(a, b, x)) == np.any(np.isnan([a, b, x]))
class TestHyp1f1(object):
@pytest.mark.parametrize('a, b, x', [
(np.nan, 1, 1),
(1, np.nan, 1),
(1, 1, np.nan)
])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyp1f1(a, b, x))
def test_poles(self):
assert_equal(sc.hyp1f1(1, [0, -1, -2, -3, -4], 0.5), np.infty)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 0.5, 0.5),
(1, 1, 0.5, 1.6487212707001281468),
(2, 1, 0.5, 2.4730819060501922203),
(1, 2, 0.5, 1.2974425414002562937),
(-10, 1, 0.5, -0.38937441413785204475)
])
def test_special_cases(self, a, b, x, result):
# Hit all the special case branches at the beginning of the
# function. Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(1, 1, 0.44, 1.5527072185113360455),
(-1, 1, 0.44, 0.55999999999999999778),
(100, 100, 0.89, 2.4351296512898745592),
(-100, 100, 0.89, 0.40739062490768104667),
(1.5, 100, 59.99, 3.8073513625965598107),
(-1.5, 100, 59.99, 0.25099240047125826943)
])
def test_geometric_convergence(self, a, b, x, result):
# Test the region where we are relying on the ratio of
#
# (|a| + 1) * |x| / |b|
#
# being small. Desired answers computed using Mpmath
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 1.5, -0.5),
(-10, 1, 1.5, 0.41801777430943080357),
(-25, 1, 1.5, 0.25114491646037839809),
(-50, 1, 1.5, -0.25683643975194756115),
(-51, 1, 1.5, -0.19843162753845452972)
])
def test_a_negative_integer(self, a, b, x, result):
# Desired answers computed using Mpmath. After -51 the
# relative error becomes unsatisfactory and we start returning
# NaN.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-9)
def test_gh_3492(self):
desired = 0.99973683897677527773 # Computed using Mpmath
assert_allclose(
sc.hyp1f1(0.01, 150, -4),
desired,
atol=0,
rtol=1e-15
)
def test_gh_3593(self):
desired = 1.0020033381011970966 # Computed using Mpmath
assert_allclose(
sc.hyp1f1(1, 5, 0.01),
desired,
atol=0,
rtol=1e-15
)
@pytest.mark.parametrize('a, b, x, desired', [
(-1, -2, 2, 2),
(-1, -4, 10, 3.5),
(-2, -2, 1, 2.5)
])
def test_gh_11099(self, a, b, x, desired):
# All desired results computed using Mpmath
assert sc.hyp1f1(a, b, x) == desired
| 32.805556 | 74 | 0.55405 |
e436465537683cf7c45e2fb56eb2824c98c7d896 | 57,461 | py | Python | Simulations/DDQL_AV/SumoSupervisor.py | mattwfranchi/Webots.HPC | 137270ad3796e5bb4b93dfc4b93c805e59a2e828 | [
"MIT"
] | 3 | 2021-09-24T14:52:18.000Z | 2021-12-07T13:24:52.000Z | Simulations/HighwayMerge/SumoSupervisor.py | mattwfranchi/Webots.HPC | 137270ad3796e5bb4b93dfc4b93c805e59a2e828 | [
"MIT"
] | null | null | null | Simulations/HighwayMerge/SumoSupervisor.py | mattwfranchi/Webots.HPC | 137270ad3796e5bb4b93dfc4b93c805e59a2e828 | [
"MIT"
] | null | null | null | # Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SumoSupervisor class inheriting from Supervisor."""
from controller import Supervisor, Node
from Objects import Vehicle, TrafficLight
from WebotsVehicle import WebotsVehicle
#from __future__ import absolute_import
#from __future__ import print_function
import os
import sys
import math
import json
import random
import numpy as np
import traceback
from scipy.stats import lognorm
from scipy.stats import uniform
from scipy.stats import weibull_min
from scipy.stats import gamma
hiddenPosition = 10000
################################
# Risk-based Predictive Model developed using NGSIM data
################################
#lag crash risk prediction
def risk_lag_prior(Speed_Merge,Acce_Merge,Remaining_Distance,Acce_lag,Gap_lag,Speed_dif):
beta=[-0.648166,0.291651,-2.67226,0.010325,2.736206,-1.9484,3.314949] #beta=[intercept,variable1,variable2,...]
X=[1,Speed_Merge,Acce_Merge,Remaining_Distance,Acce_lag,Gap_lag,Speed_dif]
Pr_lag_prior=1/(1+np.exp(-np.dot(beta,X)))
#print("Pr_lag_prior",Pr_lag_prior)
return Pr_lag_prior
def lag_conflict_likelihood(Gap_lag):
#weibull cdf
c=1.177935
loc=0
scale=15.060868
x_upper=Gap_lag+0.5
x_lower=Gap_lag-0.5
Pr_gap_lag_conflict=weibull_min.cdf(x_upper,c,loc,scale)-weibull_min.cdf(x_lower, c,loc,scale)
return Pr_gap_lag_conflict
def lag_nonconflict_likelihood(Gap_lag):
shape = 3.82145718
rate = 0.06710455
#gamma cdf
loc=0
scale=1/rate
x_upper=Gap_lag+0.5
x_lower=Gap_lag-0.5
Pr_gap_lag_nonconflict=gamma.cdf(x_upper,shape,loc,scale)-gamma.cdf(x_lower,shape,loc,scale)
return Pr_gap_lag_nonconflict
def risk_lag_posterior(Gap_lag,Pr_lag_prior,Pr_gap_lag_nonconflict,Pr_gap_lag_conflict):
#print ('1-Pr_lag_prior = ', 1-Pr_lag_prior)
#print ('Pr_gap_lag_conflict * Pr_lag_prior = ', Pr_gap_lag_conflict * Pr_lag_prior)
#print ('((Pr_gap_lag_conflict * Pr_lag_prior) + Pr_gap_lag_nonconflict *(1-Pr_lag_prior)) = ', ((Pr_gap_lag_conflict * Pr_lag_prior) + Pr_gap_lag_nonconflict *(1-Pr_lag_prior)))
#print ('Pr_lag_prior * Pr_gap_lag_conflict = ', Pr_lag_prior * Pr_gap_lag_conflict)
denom = ((Pr_gap_lag_conflict * Pr_lag_prior) + Pr_gap_lag_nonconflict *(1-Pr_lag_prior))
if(denom == 0):
return 1.0
Pr_lag_posterior= (Pr_lag_prior * Pr_gap_lag_conflict)/denom
#print ('Pr_lag_posterior: ', Pr_lag_posterior)
#print ('')
return Pr_lag_posterior
def risk_lead_prior(Acce_Merge,Remaining_Distance,Speed_lead,Acce_lead,Gap_lead):
beta=[-0.871,-1.257,0.029,-0.034,0.451,-0.301]
X=[1,Acce_Merge,Remaining_Distance,Speed_lead,Acce_lead,Gap_lead+2.5]
Pr_lead_prior=1/(1+np.exp(-np.dot(beta,X)))
return Pr_lead_prior
def lead_conflict_likelihood(Gap_lead):
#uniform cdf
loc= -4.996666
scale=52.099515+4.996666
x_upper=Gap_lead+0.5
x_lower=Gap_lead-0.5
Pr_gap_lead_conflict=uniform.cdf(x_upper,loc,scale)-uniform.cdf(x_lower,loc,scale)
return Pr_gap_lead_conflict
def lead_nonconflict_likelihood(Gap_lead):
#weibull cdf
c= 1.609829
loc=0
scale=49.765264
x_upper=Gap_lead+0.5
x_lower=Gap_lead-0.5
Pr_gap_lead_conflict=weibull_min.cdf(x_upper,c,loc,scale)-weibull_min.cdf(x_lower, c,loc,scale)
return Pr_gap_lead_conflict
def risk_lead_posterior(Gap_lead,Pr_lead_prior,Pr_gap_lead_nonconflict,Pr_gap_lead_conflict):
denom = (Pr_gap_lead_conflict*Pr_lead_prior+Pr_gap_lead_nonconflict*(1-Pr_lead_prior))
if(denom == 0):
return 1.0
Pr_lead_posterior=Pr_lead_prior*Pr_gap_lead_conflict/(Pr_gap_lead_conflict*Pr_lead_prior+Pr_gap_lead_nonconflict*(1-Pr_lead_prior))
return Pr_lead_posterior
def safety_distance_min(Speed_Merge,Speed_lead):
b_av=4.6
b_lead=4.2
tau_av=0.9
S_min=Speed_Merge*tau_av+1/2*np.square(Speed_Merge)/b_av-1/2*np.square(Speed_lead)/b_lead
return S_min
#compute TIT, referred to as the entity of the TTC lower than the TTC threshold.
def TIT(traci,step,segment_vehicleIDs):
TIT_step=0
conflict_indicator=0
for vehicleID in segment_vehicleIDs:
front_vehicle=traci.vehicle.getLeader(vehicleID)
if front_vehicle is not None:
front_vehicleID=front_vehicle[0]
if front_vehicleID in segment_vehicleIDs:
speed_subject = traci.vehicle.getSpeed(vehicleID)
acce_subject = traci.vehicle.getAcceleration(vehicleID)
speed_front = traci.vehicle.getSpeed(front_vehicleID)
acce_front = traci.vehicle.getAcceleration(front_vehicleID)
delta_acce=acce_subject-acce_front
delta_speed = speed_subject-speed_front
coefs = [1/2*delta_acce,delta_speed,-front_vehicle[1]]
r= np.roots(coefs)
r= r[np.isreal(r)]
if len(r)>=2:
if r[0] <0 and r[1]<0:
TTC_lead=float("inf")
elif r[0]<0 or r[1]<0:
TTC_lead=np.max(r)
elif r[0]>0 and r[1]>0:
TTC_lead=np.min(r)
else:
TTC_lead=float("inf")
if TTC_lead<=2 and TTC_lead>0:
conflict_indicator=(1/TTC_lead-1/2)
else:
conflict_indicator=0
TIT_step=TIT_step+conflict_indicator*0.2
return TIT_step
#compute TET
def TET(traci,step,segment_vehicleIDs,output_TTC_Evaluate):
TET_step=0
conflict_indicator=0
for vehicleID in segment_vehicleIDs:
front_vehicle=traci.vehicle.getLeader(vehicleID)
if front_vehicle is not None:
front_vehicleID=front_vehicle[0]
if front_vehicleID in segment_vehicleIDs:
speed_subject = traci.vehicle.getSpeed(vehicleID)
acce_subject = traci.vehicle.getAcceleration(vehicleID)
speed_front = traci.vehicle.getSpeed(front_vehicleID)
acce_front = traci.vehicle.getAcceleration(front_vehicleID)
delta_acce=acce_subject-acce_front
delta_speed = speed_subject-speed_front
coefs = [1/2*delta_acce,delta_speed,-front_vehicle[1]]
r= np.roots(coefs)
r= r[np.isreal(r)]
if len(r)>=2:
if r[0] <0 and r[1]<0:
TTC_lead=float("inf")
elif r[0]<0 or r[1]<0:
TTC_lead=np.max(r)
elif r[0]>0 and r[1]>0:
TTC_lead=np.min(r)
print(step,vehicleID,TTC_lead,file=output_TTC_Evaluate)
else:
TTC_lead=float("inf")
print(step,vehicleID,TTC_lead,file=output_TTC_Evaluate)
if TTC_lead<=2:
conflict_indicator=1
else:
conflict_indicator=0
TET_step=TET_step+conflict_indicator*0.2
return TET_step
def TTC_merge(traci,step,av_ID,output_lead_veh,output_lag_veh,MergingVehicleID):
Speed_Merge = traci.vehicle.getSpeed(av_ID)
Acce_Merge = traci.vehicle.getAcceleration(av_ID)
Remaining_Distance = 99.49-traci.vehicle.getLanePosition(av_ID)
leftLeaders = traci.vehicle.getLeftLeaders(av_ID)
leftFollowers = traci.vehicle.getLeftFollowers(av_ID)
if len(leftLeaders)>=1:
leftLeadersVehID = leftLeaders[0][0]
Speed_lead = traci.vehicle.getSpeed(leftLeadersVehID)
Acce_lead = traci.vehicle.getAcceleration(leftLeadersVehID)
Gap_lead=leftLeaders[0][1]
is_lead=True
delta_acce=Acce_Merge-Acce_lead
delta_speed = Speed_Merge-Speed_lead
coefs = [1/2*delta_acce,delta_speed,-leftLeaders[0][1]]
r= np.roots(coefs)
r= r[np.isreal(r)]
if len(r)>=2:
if r[0] <0 and r[1]<0:
TTC_lead=float("inf")
elif r[0]<0 or r[1]<0:
TTC_lead=np.max(r)
elif r[0]>0 and r[1]>0:
TTC_lead=np.min(r)
if av_ID in MergingVehicleID:
print(step,av_ID,TTC_lead,is_lead,file=output_lead_veh)
else:
TTC_lead=float("inf")
if av_ID in MergingVehicleID:
print(step,av_ID,TTC_lead,is_lead,file=output_lead_veh)
else:
is_lead =False
TTC_lead=float("inf")
print(step,av_ID,TTC_lead,is_lead,file=output_lead_veh)
if len(leftFollowers)>=1:
leftFollowersVehID = leftFollowers[0][0]
Speed_lag =traci.vehicle.getSpeed(leftFollowersVehID)
Acce_lag =traci.vehicle.getAcceleration(leftFollowersVehID)
Gap_lag=leftFollowers[0][1]
is_lag=True
delta_acce=Acce_lag-Acce_Merge
delta_speed = Speed_lag-Speed_Merge
coefs = [1/2*delta_acce,delta_speed,-leftFollowers[0][1]]
r= np.roots(coefs)
r= r[np.isreal(r)]
if len(r)>=2:
if r[0] <0 and r[1]<0:
TTC_lag=float("inf")
elif r[0]<0 or r[1]<0:
TTC_lag=np.max(r)
elif r[0]>0 and r[1]>0:
TTC_lag=np.min(r)
if av_ID in MergingVehicleID:
print(step,av_ID,TTC_lag,is_lag,file=output_lag_veh)
else:
TTC_lag=float("inf")
print(step,av_ID,TTC_lag,is_lag,file=output_lag_veh)
else:
is_lag =False
TTC_lag=float("inf")
if av_ID in MergingVehicleID:
print(step,av_ID,TTC_lag,is_lag,file=output_lag_veh)
################################
def rotation_from_yaw_pitch_roll(yaw, pitch, roll):
"""Compute the rotation from the roll pitch yaw angles."""
rotation = [0, 1, 0, 0]
# construct rotation matrix
# a b c
# d e f
# g h i
a = math.cos(roll) * math.cos(yaw)
b = -math.sin(roll)
c = math.cos(roll) * math.sin(yaw)
d = math.sin(roll) * math.cos(yaw) * math.cos(pitch) + math.sin(yaw) * math.sin(pitch)
e = math.cos(roll) * math.cos(pitch)
f = math.sin(roll) * math.sin(yaw) * math.cos(pitch) - math.cos(yaw) * math.sin(pitch)
g = math.sin(roll) * math.cos(yaw) * math.sin(pitch) - math.sin(yaw) * math.cos(pitch)
h = math.cos(roll) * math.sin(pitch)
i = math.sin(roll) * math.sin(yaw) * math.sin(pitch) + math.cos(yaw) * math.cos(pitch)
# convert it to rotation vector
cosAngle = 0.5 * (a + e + i - 1.0)
if math.fabs(cosAngle) > 1:
return rotation
else:
rotation[0] = f - h
rotation[1] = g - c
rotation[2] = b - d
rotation[3] = math.acos(cosAngle)
# normalize vector
length = math.sqrt(rotation[0] * rotation[0] + rotation[1] * rotation[1] + rotation[2] * rotation[2])
if length != 0:
rotation[0] = rotation[0] / length
rotation[1] = rotation[1] / length
rotation[2] = rotation[2] / length
if rotation[0] == 0 and rotation[1] == 0 and rotation[2] == 0:
return [0, 1, 0, 0]
else:
return rotation
class SumoSupervisor (Supervisor):
"""This is the main class that implements the actual interface."""
def get_viewpoint_position_field(self):
"""Look for the 'position' field of the Viewpoint node."""
children = self.getRoot().getField("children")
number = children.getCount()
for i in range(0, number):
node = children.getMFNode(i)
if node.getType() == Node.VIEWPOINT:
return node.getField("position")
return None
def get_initial_vehicles(self):
"""Get all the vehicles (both controlled by SUMO and Webots) already present in the world."""
for i in range(0, self.vehiclesLimit):
defName = "SUMO_VEHICLE%d" % self.vehicleNumber
node = self.getFromDef(defName)
if node:
self.vehicles[i] = Vehicle(node)
self.vehicles[i].name.setSFString("SUMO vehicle %i" % self.vehicleNumber)
self.vehicleNumber += 1
else:
break
for i in range(0, self.vehiclesLimit):
defName = "WEBOTS_VEHICLE%d" % self.webotsVehicleNumber
node = self.getFromDef(defName)
if node:
self.webotsVehicles[i] = WebotsVehicle(node, self.webotsVehicleNumber)
self.webotsVehicleNumber += 1
else:
break
def generate_new_vehicle(self, vehicleClass):
"""Generate and import a new vehicle that will be controlled by SUMO."""
# load the new vehicle
vehicleString, defName = Vehicle.generate_vehicle_string(self.vehicleNumber, vehicleClass)
self.rootChildren.importMFNodeFromString(-1, vehicleString)
self.vehicles[self.vehicleNumber] = Vehicle(self.getFromDef(defName))
self.vehicleNumber += 1
def get_vehicle_index(self, id, generateIfneeded=True):
"""Look for the vehicle index corresponding to this id (and optionnaly create it if required)."""
for i in range(0, self.vehicleNumber):
if self.vehicles[i].currentID == id:
# the vehicle was already here at last step
return i
if not generateIfneeded:
return -1
# the vehicle was not present last step
# check if a corresponding vehicle is already in the simulation
node = self.getFromDef(id)
if node and (node.getTypeName() in Vehicle.get_car_models_list() or
node.getTypeName() in Vehicle.get_bus_models_list() or
node.getTypeName() in Vehicle.get_truck_models_list() or
node.getTypeName() in Vehicle.get_motorcycle_models_list()):
self.vehicles[self.vehicleNumber] = Vehicle(node)
self.vehicles[self.vehicleNumber].currentID = id
self.vehicleNumber += 1
return self.vehicleNumber - 1
# check if a vehicle is available
vehicleClass = self.get_vehicle_class(id)
for i in range(0, self.vehicleNumber):
if not self.vehicles[i].inUse and self.vehicles[i].vehicleClass == vehicleClass:
# if a vehicle is available assign it to this id
self.vehicles[i].currentID = id
self.vehicles[i].name.setSFString(id)
return i
# no vehicle available => generate a new one if limit is not reached
if self.vehicleNumber < self.vehiclesLimit:
vehicleClass = self.get_vehicle_class(id)
self.generate_new_vehicle(vehicleClass)
return self.vehicleNumber - 1
return -1
def get_vehicle_class(self, id):
"""Get the class of the vehicle associated to this id."""
if id in self.vehiclesClass:
return self.vehiclesClass[id]
vehicleClass = Vehicle.get_corresponding_vehicle_class(self.traci.vehicle.getVehicleClass(id))
self.vehiclesClass[id] = vehicleClass
return vehicleClass
def disable_unused_vehicles(self, IdList):
"""Check for all the vehicles currently used if they need to be disabled."""
for i in range(0, self.vehicleNumber):
if self.vehicles[i].inUse and self.vehicles[i].currentID not in IdList:
self.vehicles[i].inUse = False
self.vehicles[i].name.setSFString("SUMO vehicle %i" % i)
self.vehicles[i].currentLane = None
self.vehicles[i].currentRoad = None
self.vehicles[i].laneChangeStartTime = None
self.vehicles[i].laneChangeDistance = 0
def hide_unused_vehicles(self):
"""Hide all the newly unused vehicles."""
for i in range(0, self.vehicleNumber):
if not self.vehicles[i].inUse:
if self.vehicles[i].targetPos[0] != hiddenPosition:
self.vehicles[i].targetPos = [hiddenPosition, 0.5, i * 10]
self.vehicles[i].currentPos = [hiddenPosition, 0.5, i * 10]
self.vehicles[i].currentRot = [0, 1, 0, 0]
self.vehicles[i].targetRot = [0, 1, 0, 0]
self.vehicles[i].currentAngles = [0, 0, 0]
self.vehicles[i].targetAngles = [0, 0, 0]
self.vehicles[i].translation.setSFVec3f([hiddenPosition, 0.5, i * 10])
self.vehicles[i].node.setVelocity([0, 0, 0, 0, 0, 0])
for wheelAngularVelocity in self.vehicles[i].wheelsAngularVelocity:
wheelAngularVelocity.setSFVec3f([0, 0, 0])
def stop_all_vehicles(self):
"""Stop all the vehicles (to be called when controller exits)."""
for i in range(0, self.vehicleNumber):
self.vehicles[i].node.setVelocity([0, 0, 0, 0, 0, 0])
for wheelAngularVelocity in self.vehicles[i].wheelsAngularVelocity:
wheelAngularVelocity.setSFVec3f([0, 0, 0])
def get_vehicles_position(self, id, subscriptionResult, step, xOffset, yOffset,
maximumLateralSpeed, maximumAngularSpeed, laneChangeDelay):
"""Compute the new desired position and orientation for all the vehicles controlled by SUMO."""
if subscriptionResult is None:
return
height = 0.4
roll = 0.0
pitch = 0.0
sumoPos = subscriptionResult[self.traci.constants.VAR_POSITION]
sumoAngle = subscriptionResult[self.traci.constants.VAR_ANGLE]
pos = [-sumoPos[0] + xOffset, height, sumoPos[1] - yOffset]
angle = math.pi * sumoAngle / 180
dx = -math.cos(angle)
dy = -math.sin(angle)
yaw = -math.atan2(dy, -dx)
# correct position (origin of the car is not the same in Webots / sumo)
vehicleLength = subscriptionResult[self.traci.constants.VAR_LENGTH]
pos[0] += 0.5 * vehicleLength * math.sin(angle)
pos[2] -= 0.5 * vehicleLength * math.cos(angle)
# if needed check the vehicle is in the visibility radius
if self.radius > 0:
viewpointPosition = self.viewpointPosition.getSFVec3f()
xDiff = viewpointPosition[0] - pos[0]
yDiff = viewpointPosition[1]
zDiff = viewpointPosition[2] - pos[2]
distance = math.sqrt(xDiff * xDiff + yDiff * yDiff + zDiff * zDiff)
if distance > self.radius:
index = self.get_vehicle_index(id, generateIfneeded=False)
if index >= 0:
self.vehicles[index].inUse = False
self.vehicles[index].currentID = ""
self.vehicles[index].name.setSFString("SUMO vehicle %i" % index)
return
index = self.get_vehicle_index(id)
if index >= 0:
vehicle = self.vehicles[index]
height = vehicle.wheelRadius
if self.enableHeight:
roadID = subscriptionResult[self.traci.constants.VAR_ROAD_ID]
roadPos = subscriptionResult[self.traci.constants.VAR_LANEPOSITION]
if roadID.startswith(":"):
# this is a lane change it does not contains edge information
# in that case, use previous height, roll and pitch
height = vehicle.currentPos[1]
roll = vehicle.roll
pitch = vehicle.pitch
else:
tags = roadID.split('_')
del tags[0] # remove the first one which is the 'id' of the road
for tag in tags:
if tag.startswith('height'):
height = height + float(tag.split('height', 1)[1])
elif tag.startswith('roll'):
roll = float(tag.split('roll', 1)[1])
elif tag.startswith('pitch'):
pitch = float(tag.split('pitch', 1)[1])
vehicle.pitch = pitch
vehicle.roll = roll
# ajust height according to the pitch
if pitch != 0:
height += (roadPos - 0.5 * vehicleLength) * math.sin(pitch)
# ajust height according to the roll and lateral position of the vehicle
if roll != 0.0:
laneIndex = subscriptionResult[self.traci.constants.VAR_LANE_INDEX]
laneID = subscriptionResult[self.traci.constants.VAR_LANE_ID]
laneWidth = self.traci.lane.getWidth(laneID)
edge = self.net.getEdge(roadID)
numberOfLane = edge.getLaneNumber()
# compute lateral distance from the center of the lane
distance = math.fabs((laneIndex - numberOfLane / 2) + 0.5) * laneWidth
if laneIndex >= (numberOfLane / 2):
height = height - distance * math.sin(roll)
else:
height = height + distance * math.sin(roll)
pos[1] = height
if vehicle.inUse:
# TODO: once the lane change model of SUMO has been improved
# (sub-lane model currently in development phase) we will be able to remove this corrections
# compute lateral (x) and longitudinal (z) displacement
diffX = pos[0] - vehicle.targetPos[0]
diffZ = pos[2] - vehicle.targetPos[2]
x1 = math.cos(-angle) * diffX - math.sin(-angle) * diffZ
z1 = math.sin(-angle) * diffX + math.cos(-angle) * diffZ
# check for lane change
if (vehicle.currentRoad is not None and
vehicle.currentRoad == subscriptionResult[self.traci.constants.VAR_ROAD_ID] and
vehicle.currentLane is not None and
vehicle.currentLane != subscriptionResult[self.traci.constants.VAR_LANE_INDEX]):
vehicle.laneChangeStartTime = self.getTime()
vehicle.laneChangeDistance = x1
x2 = x1
# artificially add an angle depending on the lateral speed
artificialAngle = 0
if z1 > 0.0001: # don't add the angle if speed is very small as atan2(0.0, 0.0) is unstable
# the '0.15' factor was found empirically and should not depend on the simulation
artificialAngle = 0.15 * math.atan2(x1, z1)
if (vehicle.laneChangeStartTime is not None and
vehicle.laneChangeStartTime > self.getTime() - laneChangeDelay and
abs(vehicle.laneChangeDistance) >= abs(x1)): # lane change case
ratio = (self.getTime() - vehicle.laneChangeStartTime) / laneChangeDelay
ratio = (0.5 + 0.5 * math.sin((ratio - 0.5) * math.pi))
p = vehicle.laneChangeDistance * ratio
x2 = x1 - (vehicle.laneChangeDistance - p)
artificialAngle = math.atan2(x2, z1)
# limit lateral speed
threshold = 0.001 * step * maximumLateralSpeed
x2 = min(max(x2, -threshold), threshold)
x3 = math.cos(angle) * x2 - math.sin(angle) * z1
z3 = math.sin(angle) * x2 + math.cos(angle) * z1
pos = [x3 + vehicle.targetPos[0], pos[1], z3 + vehicle.targetPos[2]]
diffYaw = yaw - vehicle.targetAngles[1] - artificialAngle
# limit angular speed
while diffYaw > math.pi:
diffYaw -= 2 * math.pi
while diffYaw < -math.pi:
diffYaw += 2 * math.pi
threshold = 0.001 * step * maximumAngularSpeed
diffYaw = min(max(diffYaw, -threshold), threshold)
yaw = diffYaw + vehicle.targetAngles[1]
# tilt motorcycle depending on the angluar speed
if vehicle.type in Vehicle.get_motorcycle_models_list():
threshold = 0.001 * step * maximumLateralSpeed
roll -= min(max(diffYaw / (0.001 * step), -0.2), 0.2)
rot = rotation_from_yaw_pitch_roll(yaw, pitch, roll)
if not vehicle.inUse:
# this vehicle was previously not used, move it directly to the correct initial location
vehicle.inUse = True
vehicle.currentPos = pos
vehicle.currentRot = rot
vehicle.currentAngles = [roll, yaw, pitch]
else:
vehicle.currentPos = vehicle.targetPos
vehicle.currentRot = vehicle.targetRot
vehicle.currentAngles = vehicle.targetAngles
# update target and wheels speed
vehicle.targetPos = pos
vehicle.targetRot = rot
vehicle.targetAngles = [roll, yaw, pitch]
if self.traci.constants.VAR_SPEED in subscriptionResult:
vehicle.speed = subscriptionResult[self.traci.constants.VAR_SPEED]
vehicle.currentRoad = subscriptionResult[self.traci.constants.VAR_ROAD_ID]
vehicle.currentLane = subscriptionResult[self.traci.constants.VAR_LANE_INDEX]
def update_vehicles_position_and_velocity(self, step, rotateWheels):
"""Update the actual position (using angular and linear velocities) of all the vehicles in Webots."""
for i in range(0, self.vehicleNumber):
if self.vehicles[i].inUse:
self.vehicles[i].translation.setSFVec3f(self.vehicles[i].currentPos)
self.vehicles[i].rotation.setSFRotation(self.vehicles[i].currentRot)
velocity = []
velocity.append(self.vehicles[i].targetPos[0] - self.vehicles[i].currentPos[0])
velocity.append(self.vehicles[i].targetPos[1] - self.vehicles[i].currentPos[1])
velocity.append(self.vehicles[i].targetPos[2] - self.vehicles[i].currentPos[2])
for j in range(0, 3):
diffAngle = self.vehicles[i].currentAngles[j] - self.vehicles[i].targetAngles[j]
if diffAngle > math.pi:
diffAngle = diffAngle - 2 * math.pi
elif diffAngle < -math.pi:
diffAngle = diffAngle + 2 * math.pi
velocity.append(diffAngle)
velocity[:] = [1000 * x / step for x in velocity]
self.vehicles[i].node.setVelocity(velocity)
if rotateWheels:
angularVelocity = [self.vehicles[i].speed / self.vehicles[i].wheelRadius, 0, 0]
for wheelAngularVelocity in self.vehicles[i].wheelsAngularVelocity:
wheelAngularVelocity.setSFVec3f(angularVelocity)
def update_webots_vehicles(self, xOffset, yOffset):
"""Update the position of all the vehicles controlled by Webots in SUMO."""
for i in range(0, self.webotsVehicleNumber):
if self.webotsVehicles[i].is_on_road(xOffset, yOffset, self.maxWebotsVehicleDistanceToLane, self.net):
self.webotsVehicles[i].update_position(self.getTime(), self.net, self.traci, self.sumolib, xOffset, yOffset)
else:
# the controlled vehicle is not on any road
# => we remove it from sumo network
if self.webotsVehicles[i].name in self.traci.vehicle.getIDList():
self.traci.vehicle.remove(self.webotsVehicles[i].name)
def get_traffic_light(self, IDlist):
"""Get the state of all the traffic lights controlled by SUMO."""
self.trafficLightNumber = len(IDlist)
self.trafficLights = {}
LEDNames = []
for i in range(0, self.getNumberOfDevices()):
device = self.getDeviceByIndex(i)
if device.getNodeType() == Node.LED:
LEDNames.append(device.getName())
for i in range(0, self.trafficLightNumber):
id = IDlist[i]
self.trafficLights[id] = TrafficLight()
self.trafficLights[id].lightNumber = len(self.traci.trafficlight.getRedYellowGreenState(id))
for j in range(0, self.trafficLights[id].lightNumber):
trafficLightNode = self.getFromDef("TLS_" + id + "_" + str(j))
if trafficLightNode is not None:
self.trafficLights[id].trafficLightRecognitionColors[j] = trafficLightNode.getField("recognitionColors")
ledName = id + "_" + str(j) + "_"
if (ledName + "r") in LEDNames:
self.trafficLights[id].LED[3 * j + 0] = self.getLED(ledName + "r")
else:
self.trafficLights[id].LED[3 * j + 0] = None
if (ledName + "y") in LEDNames:
self.trafficLights[id].LED[3 * j + 1] = self.getLED(ledName + "y")
else:
self.trafficLights[id].LED[3 * j + 1] = None
if (ledName + "g") in LEDNames:
self.trafficLights[id].LED[3 * j + 2] = self.getLED(ledName + "g")
else:
self.trafficLights[id].LED[3 * j + 2] = None
def update_traffic_light_state(self, id, states):
"""Update the traffic lights state in Webots."""
# update light LED state if traffic light state has changed
currentState = states[self.traci.constants.TL_RED_YELLOW_GREEN_STATE]
if self.trafficLights[id].previousState != currentState:
self.trafficLights[id].previousState = currentState
for j in range(0, self.trafficLights[id].lightNumber):
# Update red LED if it exists
if self.trafficLights[id].LED[3 * j + 0]:
if currentState[j] == 'r' or currentState[j] == 'R':
self.trafficLights[id].LED[3 * j + 0].set(1)
# update recognition colors
if j in self.trafficLights[id].trafficLightRecognitionColors:
self.trafficLights[id].trafficLightRecognitionColors[j].setMFColor(1, [1, 0, 0])
else:
self.trafficLights[id].LED[3 * j + 0].set(0)
# Update yellow LED if it exists
if self.trafficLights[id].LED[3 * j + 1]:
if currentState[j] == 'y' or currentState[j] == 'Y':
self.trafficLights[id].LED[3 * j + 1].set(1)
# update recognition colors
if j in self.trafficLights[id].trafficLightRecognitionColors:
self.trafficLights[id].trafficLightRecognitionColors[j].setMFColor(1, [1, 0.5, 0])
else:
self.trafficLights[id].LED[3 * j + 1].set(0)
# Update green LED if it exists
if self.trafficLights[id].LED[3 * j + 2]:
if currentState[j] == 'g' or currentState[j] == 'G':
self.trafficLights[id].LED[3 * j + 2].set(1)
# update recognition colors
if j in self.trafficLights[id].trafficLightRecognitionColors:
self.trafficLights[id].trafficLightRecognitionColors[j].setMFColor(1, [0, 1, 0])
else:
self.trafficLights[id].LED[3 * j + 2].set(0)
def run(self, port, disableTrafficLight, directory, step, rotateWheels,
maxVehicles, radius, enableHeight, useDisplay, displayRefreshRate,
displayZoom, displayFitSize, maximumLateralSpeed, maximumAngularSpeed,
laneChangeDelay, traci, sumolib):
"""Main loop function."""
try:
print('Connect to SUMO... This operation may take a few seconds.')
self.step(step)
traci.init(port, numRetries=20)
except:
sys.exit('Unable to connect to SUMO, please make sure any previous instance of SUMO is closed.\n You can try'
' changing SUMO port using the "--port" argument.')
self.traci = traci
self.sumolib = sumolib
self.radius = radius
self.enableHeight = enableHeight
self.sumoClosed = False
self.temporaryDirectory = directory
self.rootChildren = self.getRoot().getField("children")
self.viewpointPosition = self.get_viewpoint_position_field()
self.maxWebotsVehicleDistanceToLane = 5
self.webotsVehicleNumber = 0
self.webotsVehicles = {}
self.vehicleNumber = 0
self.vehicles = {}
self.vehiclesLimit = maxVehicles
self.vehiclesClass = {}
self.count_steps = 0
# for backward compatibility
if self.traci.constants.TRACI_VERSION <= 15:
self.traci.trafficlight = self.traci.trafficlights
# get sumo vehicles already present in the world
self.get_initial_vehicles()
# parse the net and get the offsets
self.net = sumolib.net.readNet((directory + "/sumo.net.xml").replace('/', os.sep))
xOffset = self.net.getLocationOffset()[0]
yOffset = self.net.getLocationOffset()[1]
# Load plugin to the generic SUMO Supervisor (if any)
self.usePlugin = False
if os.path.exists((directory + "/plugin.py").replace('/', os.sep)):
self.usePlugin = True
sys.path.append(directory)
import plugin
sumoSupervisorPlugin = plugin.SumoSupervisorPlugin(self, self.traci, self.net)
# Get all the LEDs of the traffic lights
if not disableTrafficLight:
trafficLightsList = self.traci.trafficlight.getIDList()
self.get_traffic_light(trafficLightsList)
for id in trafficLightsList:
# subscribe to traffic lights state
self.traci.trafficlight.subscribe(id, [self.traci.constants.TL_RED_YELLOW_GREEN_STATE])
# Subscribe to new vehicles entering the simulation
self.traci.simulation.subscribe([
self.traci.constants.VAR_DEPARTED_VEHICLES_IDS,
self.traci.constants.VAR_MIN_EXPECTED_VEHICLES
])
# Create the vehicle variable subscription list
self.vehicleVariableList = [
self.traci.constants.VAR_POSITION,
self.traci.constants.VAR_ANGLE,
self.traci.constants.VAR_LENGTH,
self.traci.constants.VAR_ROAD_ID,
self.traci.constants.VAR_LANE_INDEX
]
if rotateWheels:
self.vehicleVariableList.append(self.traci.constants.VAR_SPEED)
if enableHeight:
self.vehicleVariableList.extend([
self.traci.constants.VAR_ROAD_ID,
self.traci.constants.VAR_LANEPOSITION,
self.traci.constants.VAR_LANE_ID
])
# create the SUMO display
self.sumoDisplay = None
if useDisplay:
view = self.traci.gui.getIDList()[0]
display = self.getDisplay('sumo')
if display is not None:
from SumoDisplay import SumoDisplay
self.sumoDisplay = SumoDisplay(display, displayZoom, view, directory, displayRefreshRate, displayFitSize,
self.traci)
########################################
risk_based = 1 #switch to gap/risk models
#print ('Driver Behavior : ',driver_behavior, av_arrival_time, traffic)
################################
out_path = "C:\\Users\\weiminj\\Desktop\\webotoutput\\risk_based"
if(risk_based==1):
out_path = "C:\\Users\\weiminj\\Desktop\\webotoutput\\risk_based"
print ('Risk Based Model Activated!')
else :
out_path = "C:\\Users\\weiminj\\Desktop\\webotoutput\\basemodel"
print ('Gap Based Model Activated!')
output = open(out_path + "\\output.txt", "w")
output_TTC_Evaluate = open(out_path + "\output_TTC_Evaluate.txt", "w")
print ("TimeStep","vehicleID","TTC ",file=output_TTC_Evaluate)
output_TET_Evaluate = open(out_path+ "\output_TET_Evaluate.txt", "w")
print ("TimeStep","vehicleID","TET ",file=output_TET_Evaluate)
output_TIT_Evaluate = open(out_path+ "\output_TIT_Evaluate.txt", "w")
print ("TimeStep","vehicleID","TIT ",file=output_TIT_Evaluate)
output_lead_veh = open(out_path + "\output_lead_veh.txt", "w")
print ("TimeStep","MergingVehicleID","TTC",file=output_lead_veh)
output_lag_veh = open(out_path + "\output_lag_veh.txt", "w")
print ("TimeStep","MergingVehicleID","TTC",file=output_lag_veh)
output_prob = open(out_path + "\output_prob.txt", "w")
#output_SUMO_trajectory_profile = open(out_path + "\output_SUMO_trajectory_profile.txt", "w")
#print("TimeStep","lane_ID_AV","Gap_AV_front","Pr_lead_prior","Pr_lag_prior","Speed_Merge","safety_distance","Acce_lag","avg_speed_target_lane","leftLeadersVehID","AV_front_vehicleID","leftFollowersVehID","Gap_lead","Gap_lag",file=output_SUMO_trajectory_profile)
#output_lead_acc = open(out_path + "\\acc.txt","w")
# Main simulation loop
while self.step(step) >= 0:
if self.usePlugin:
sumoSupervisorPlugin.run(step)
if self.sumoDisplay is not None:
self.sumoDisplay.step(step)
# try to perform a SUMO step, if it fails it means SUMO has been closed by the user
try:
self.traci.simulationStep()
except self.traci.exceptions.FatalTraCIError:
print("Sumo closed")
self.sumoClosed = True
break
result = self.traci.simulation.getSubscriptionResults()
#############################
# Mahfuz Code Starts
#############################
#print ('SUMO Time:', self.getTime())
# This file are for evalaution for both baselien and risk-based model.
av_ID="webotsVehicle0"
segment_vehicleIDs=traci.edge.getLastStepVehicleIDs("37434591_4")
MergingVehicleID = traci.lane.getLastStepVehicleIDs("37434591_4_0")
print (self.count_steps*0.2,segment_vehicleIDs,TET(self.traci, self.count_steps,segment_vehicleIDs,output_TTC_Evaluate),
file=output_TET_Evaluate)
print (self.count_steps*0.2,segment_vehicleIDs,TIT(self.traci, self.count_steps,segment_vehicleIDs),
file=output_TIT_Evaluate)
try:
TTC_merge(self.traci,self.count_steps*0.2,av_ID,output_lead_veh,output_lag_veh,MergingVehicleID)
except Exception as err:
pass
#######################################
# RISK BASED SOLUTION
#######################################
if(risk_based==1):
#This is Risk-based model
#print ('Risk Based Model Activated!')
output_merge_command = open(out_path + "\\output_merge_command.txt", "w")
output_SUMO_trajectory_profile = open(out_path + "\output_SUMO_trajectory_profile.txt", "w")
output_SUMO_lag_veh_profile=open(out_path + "\output_SUMO_lag_veh_profile.txt", "w")
#av_ID="webotsVehicle0"
##################################################
# This will tell the vehicle to stop after the merging is done.
##################################################
MainRoad_1_VehicleID = self.traci.lane.getLastStepVehicleIDs("37434591_3_0")
MergingVehicleID = self.traci.lane.getLastStepVehicleIDs("37434591_4_1")
output = open(out_path + "\\output.txt", "w")
##################################
# webots vehicle
##################################
try:
speed = self.traci.vehicle.getPosition(av_ID)
pos =self.traci.vehicle.getPosition(av_ID)
acc = self.traci.vehicle.getAcceleration(av_ID)
data = {'eid': 3, 'vid': av_ID,
'spd': speed,
'acc': acc,
'x': pos[0], 'y':pos[1]
}
jdata = json.dumps(data)
#print (jdata, file=output)
except Exception as ex:
print ('could not find webots vehicle')
##################################
# other vehicles
##################################
for vehicle_id in MergingVehicleID:
speed =self.traci.vehicle.getSpeed(vehicle_id)
acc = self.traci.vehicle.getAcceleration(vehicle_id)
pos =self.traci.vehicle.getPosition(vehicle_id)
leader_vehicle = self.traci.vehicle.getLeader(vehicle_id)
#print (leader_vehicle)
l_spd = 12.5
l_dist = 1000.0
if(leader_vehicle is not None):
l_spd = leader_vehicle[0]
l_dist = leader_vehicle[1]
data = {'eid': 1, 'vid': vehicle_id,
'spd': speed,
'acc': acc,
'x': pos[0], 'y':pos[1],
'l_spd':l_spd,
'l_dist':l_dist}
jdata = json.dumps(data)
#print (jdata, file=output)
for vehicle_id in MainRoad_1_VehicleID:
speed =self.traci.vehicle.getSpeed(vehicle_id)
acc =self.traci.vehicle.getAcceleration(vehicle_id)
pos =self.traci.vehicle.getPosition(vehicle_id)
leader_vehicle = self.traci.vehicle.getLeader(vehicle_id)
#print (leader_vehicle)
l_spd = 12.5
l_dist = 10000.0
if(leader_vehicle is not None):
l_spd = leader_vehicle[0]
l_dist = leader_vehicle[1]
data = {'eid': 1, 'vid': vehicle_id,
'spd': speed,
'acc': acc,
'x': pos[0], 'y':pos[1],
'l_spd':l_spd,
'l_dist':l_dist}
jdata = json.dumps(data)
#print (jdata, file=output)
output.close()
############################################
try:
Speed_Merge = self.traci.vehicle.getSpeed(av_ID)
Acce_Merge = self.traci.vehicle.getAcceleration(av_ID)
Remaining_Distance = 99.49-self.traci.vehicle.getLanePosition(av_ID)
leftLeaders = self.traci.vehicle.getLeftLeaders(av_ID)
leftFollowers = self.traci.vehicle.getLeftFollowers(av_ID)
lane_ID_AV=self.traci.vehicle.getLaneID(av_ID) #lane ID of the front vehicle
Pr_lead_posterior=1
Pr_lag_posterior=1
#print("leftFollowers",leftFollowers)
Gap_AV_front=float("nan")
global AV_front_vehicleID
AV_front_vehicleID=""
## vehicle-level evaluation
AV_front_vehicle=self.traci.vehicle.getLeader(av_ID)
if AV_front_vehicle is not None: #ensure that we have value of each variable
AV_front_vehicleID=AV_front_vehicle[0]
lane_ID_AV_front=self.traci.vehicle.getLaneID(AV_front_vehicleID) #lane ID of the front vehicle
Gap_AV_front=AV_front_vehicle[1]
speed_AV_front_veh=self.traci.vehicle.getSpeed(AV_front_vehicleID) # speed
## system-level evaluation
target_lane_ID ="37434591_4_1"
avg_speed_target_lane=self.traci.lane.getLastStepMeanSpeed(target_lane_ID) # average speed of the target lane
global leftLeadersVehID_global
global leftFollowersVehID_global
leftLeadersVehID_global=""
leftFollowersVehID_global=""
print(lane_ID_AV,avg_speed_target_lane)
Speed_lead= float("nan")
Acce_lead=float("nan")
Gap_lead=float("nan")
Speed_lag= float("nan")
Acce_lag=float("nan")
Gap_lag=float("nan")
safety_distance=float("nan")
Acce_lead = 1000
if len(leftLeaders)>=1:
is_lead=True
leftLeadersVehID_global = leftLeaders[0][0]
Speed_lead = self.traci.vehicle.getSpeed(leftLeadersVehID_global)
Acce_lead = self.traci.vehicle.getAcceleration(leftLeadersVehID_global)
Gap_lead=leftLeaders[0][1]
if Gap_lead >0:
Pr_lead_prior=risk_lead_prior(Acce_Merge,Remaining_Distance,Speed_lead,Acce_lead,Gap_lead)
Pr_gap_lead_nonconflict=lead_nonconflict_likelihood(Gap_lead)
Pr_gap_lead_conflict=lead_conflict_likelihood(Gap_lead)
Pr_lead_posterior=risk_lead_posterior(Gap_lead,Pr_lead_prior,Pr_gap_lead_nonconflict,Pr_gap_lead_conflict)
else:
Pr_lead_posterior=1
else:
is_lead =False
Pr_lead_posterior=0
Acce_lag = 1000
if len(leftFollowers)>=1:
is_lag=True
leftFollowersVehID_global = leftFollowers[0][0]
Speed_lag =self.traci.vehicle.getSpeed(leftFollowersVehID_global)
Acce_lag =self.traci.vehicle.getAcceleration(leftFollowersVehID_global)
Gap_lag=leftFollowers[0][1]
print("time","lag gap","speed_lag_veh",self.count_steps*0.2,Gap_lag+2.5,Speed_lag)
print(self.count_steps*0.2,Gap_lag+2.5,Speed_lag,file=output_SUMO_lag_veh_profile)
output_SUMO_lag_veh_profile.close()
if Gap_lag>0:
Pr_lag_prior=risk_lag_prior(Speed_Merge,Acce_Merge,Remaining_Distance,Acce_lag,Gap_lag,Speed_lag-Speed_Merge)
Pr_gap_lag_nonconflict=lag_nonconflict_likelihood(Gap_lag)
Pr_gap_lag_conflict=lag_conflict_likelihood(Gap_lag)
Pr_lag_posterior=risk_lag_posterior(Gap_lag,Pr_lag_prior,Pr_gap_lag_nonconflict,Pr_gap_lag_conflict)
else:
Pr_lag_posterior=1
else:
is_lag =False
Pr_lag_posterior=0
output_lead_acc = open(out_path + "\\acc.txt","w")
print (str(Acce_lead)+','+str(Acce_lag),file=output_lead_acc)
output_lead_acc.close()
merge_command=0
# logic of risk-based merging
posterior_limit = 0.5
if (is_lead == False and is_lag ==False):
merge_command=1
elif (is_lead == True and is_lag ==False):
if (Pr_lead_posterior<posterior_limit):
merge_command=1
else:
merge_command=0
elif (is_lead == False and is_lag ==True):
if (Pr_lag_posterior<posterior_limit):
merge_command=1
else:
merge_command=0
elif (is_lead == True and is_lag ==True):
if (Pr_lead_posterior<posterior_limit and Pr_lag_posterior<posterior_limit):
merge_command=1
else:
merge_command=0
print(merge_command, Pr_lead_posterior, Pr_lag_posterior, file=output_merge_command)
print(self.count_steps*0.2,Pr_lag_prior,Pr_lag_posterior,merge_command,file=output_prob)
if AV_front_vehicle is not None: #ensure that we have value of each variable
safety_distance=safety_distance_min(Speed_Merge,speed_AV_front_veh)
print(self.count_steps*0.2,lane_ID_AV,Gap_AV_front,Pr_lead_prior,Pr_lag_prior,Speed_Merge,safety_distance,Acce_lag,avg_speed_target_lane,leftLeadersVehID_global,AV_front_vehicleID,leftFollowersVehID_global,Gap_lead,Gap_lag,file=output_SUMO_trajectory_profile)
output_SUMO_trajectory_profile.close()
#print(step,av_ID,"merge",merge_command)
except Exception as err:
#print ('Exception in sumo supervisior:', sys.exc_info()[0])
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(err).__name__, err.args)
#print (message)
#print (traceback.format_exc())
pass
output_merge_command.close()
else:
#This is the baseline model
MainRoad_1_VehicleID = self.traci.lane.getLastStepVehicleIDs("37434591_3_0")
MergingVehicleID = self.traci.lane.getLastStepVehicleIDs("37434591_4_1")
output = open(out_path + "\\output.txt", "w")
##################################
# webots vehicle
##################################
try:
speed = self.traci.vehicle.getPosition(av_ID)
pos =self.traci.vehicle.getPosition(av_ID)
acc = self.traci.vehicle.getAcceleration(av_ID)
data = {'eid': 3, 'vid': av_ID,
'spd': speed,
'acc': acc,
'x': pos[0], 'y':pos[1]
}
jdata = json.dumps(data)
print (jdata, file=output)
except Exception as ex:
print ('could not find webots vehicle')
for vehicle_id in MergingVehicleID:
speed =self.traci.vehicle.getSpeed(vehicle_id)
acc =self.traci.vehicle.getAcceleration(vehicle_id)
pos =self.traci.vehicle.getPosition(vehicle_id)
data = {'eid': 1, 'vid': vehicle_id, 'spd': speed, 'acc': acc, 'x': pos[0], 'y':pos[1]}
jdata = json.dumps(data)
print (jdata, file=output)
for vehicle_id in MainRoad_1_VehicleID:
speed =self.traci.vehicle.getSpeed(vehicle_id)
acc =self.traci.vehicle.getAcceleration(vehicle_id)
pos =self.traci.vehicle.getPosition(vehicle_id)
data = {'eid': 1, 'vid': vehicle_id, 'spd': speed, 'acc': acc, 'x': pos[0], 'y':pos[1]}
jdata = json.dumps(data)
print (jdata, file=output)
# for baseline model
av_MergingVehicleID = self.traci.lane.getLastStepVehicleIDs("37434591_4_0")
print (av_MergingVehicleID)
webots_vehicle_id = 'webotsVehicle0'
if(webots_vehicle_id in av_MergingVehicleID):
print (av_MergingVehicleID)
try:
leftLeaders =self.traci.vehicle.getLeftLeaders(webots_vehicle_id)
leftFollowers =self.traci.vehicle.getLeftFollowers(webots_vehicle_id)
av_data = {'eid':2}
print ('Left Leaders: ', leftLeaders)
print ('Left Folllowers:', leftFollowers)
if(len(leftLeaders)>0):
av_data['left_leader_dist'] = leftLeaders[0][1]
if(len(leftFollowers)>0):
av_data['left_follower_dist'] = leftFollowers[0][1]
jdata = json.dumps(av_data)
print ('Main Lane status: ', jdata)
print (jdata, file=output)
except Exception as err :
pass #print (err)
output.close()
####################################
#############################
# Mahfuz Code Ends
#############################
# SUMO simulation over (no more vehicle are expected)
if result[self.traci.constants.VAR_MIN_EXPECTED_VEHICLES] == 0:
break
# subscribe to new vehicle
for id in result[self.traci.constants.VAR_DEPARTED_VEHICLES_IDS]:
if not id.startswith("webotsVehicle"):
self.traci.vehicle.subscribe(id, self.vehicleVariableList)
elif self.sumoDisplay is not None and len(self.webotsVehicles) == 1:
# Only one vehicle controlled by Webots => center the view on it
self.traci.gui.trackVehicle(view, 'webotsVehicle0')
# get result from the vehicle subscription and apply it
idList = self.traci.vehicle.getIDList()
for id in idList:
self.get_vehicles_position(id, self.traci.vehicle.getSubscriptionResults(id),
step, xOffset, yOffset, maximumLateralSpeed, maximumAngularSpeed,
laneChangeDelay)
self.disable_unused_vehicles(idList)
# hide unused vehicles
self.hide_unused_vehicles()
if not disableTrafficLight:
for id in self.trafficLights:
self.update_traffic_light_state(id, self.traci.trafficlight.getSubscriptionResults(id))
self.update_vehicles_position_and_velocity(step, rotateWheels)
self.update_webots_vehicles(xOffset, yOffset)
self.count_steps+=1
if not self.sumoClosed:
self.traci.close()
else:
self.stop_all_vehicles()
output_TTC_Evaluate.close()
output_TET_Evaluate.close()
output_TIT_Evaluate.close()
output_lead_veh.close()
output_lag_veh.close()
#output_SUMO_trajectory_profile.close()
output_prob.close()
sys.stdout.flush()
| 46.945261 | 280 | 0.553419 |
5579be37cf2469cef1e6f683580d0d31dde1a050 | 12,156 | py | Python | python/core/tokenizer.py | rwth-acis/las2peer-TensorFlow-TextToText | 1e483b3d33c70555e29b282b2ffe4f3f4fe21b04 | [
"MIT"
] | 1 | 2018-06-11T11:03:23.000Z | 2018-06-11T11:03:23.000Z | python/core/tokenizer.py | rwth-acis/las2peer-TensorFlow-TextToText-Service | 1e483b3d33c70555e29b282b2ffe4f3f4fe21b04 | [
"MIT"
] | null | null | null | python/core/tokenizer.py | rwth-acis/las2peer-TensorFlow-TextToText-Service | 1e483b3d33c70555e29b282b2ffe4f3f4fe21b04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import html
import regex as re
#import re
from setup.settings import preprocessing
import time
import json
# inspired by https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl used in nmt's examples
# inspired by and based on https://github.com/rsennrich/subword-nmt
# Load list of protected words/phrases (those will remain unbreaked, will be not tokenized)
with open(preprocessing['protected_phrases_file'], 'r') as protected_file:
protected_phrases_regex = list(filter(lambda word: False if word[0] == '#' else True, filter(None, protected_file.read().split("\n"))))
# Prepare regex of protrcted phrases (for speed)
matched_regexes = []
unmatched_regexes = []
phrase = None
# Join multiple regexes of the same type into big one
protected_phrase_regex = None
for protected_phrase_regex in protected_phrases_regex:
matched_regex = re.search(r'\(\?:\^\|\\s\)\(\?i:\((.*?) \?\\.\)\)', protected_phrase_regex)
if matched_regex:
matched_regexes.append(matched_regex.group(1))
else:
unmatched_regexes.append(protected_phrase_regex)
if protected_phrase_regex:
phrase = re.compile(('(?:^|\s)(?i:((?:{}) ?\.))'.format('|'.join(matched_regexes)) if matched_regexes else '')\
+ ('|(?:' + (')|(?:'.join(unmatched_regexes)) + ')' if unmatched_regexes else ''))
# Compile regexes
regex = {
'special': re.compile(r'[\x00-\x1f]+|\u3000'),
'protected': phrase if phrase else None,
'periods': re.compile('\.{2,}'),
'separate': re.compile(r'(?<![▁])([^\w\s\.▁])'),
'digits': re.compile(r'([\d])'),
'joined': re.compile(r'[^\w\d_]'),
'spaces': re.compile(r'[^\S\n]+'),
'restorephrases': re.compile(r'P▁R([\d\s▁]+?)P▁R'),
'restoreperiods': re.compile(r'P▁P([\d\s▁]+?)P▁P'),
'separate_all': re.compile(r'(?<![ ▁])([^ ▁])'),
}
protected_phrases_replace = []
protected_phrases_counter = 0
# Tokenize sentence - standard
def tokenize(sentence):
global protected_phrases_replace, protected_phrases_counter, regex
protected_phrases_replace = []
protected_phrases_counter = 0
protected_periods_counter = 0
# Decode entities
sentence = html.unescape(sentence)
# Remove special tokens
sentence = sentence.replace('<unk>', '').replace('<s>', '').replace('</s>', '').replace('▁', '_')
# Strip white characters
sentence = sentence.strip()
# Remove white characters inside sentence
sentence = regex['special'].sub(' ', sentence)
# Temporary restore new line
sentence = sentence.replace('newlinechar', '\n')
# Regex-based protected phrases
if regex['protected'] and regex['protected'].search(sentence):
sentence = regex['protected'].sub(replace, sentence)
# Protect multi-periods
m = regex['periods'].findall(sentence)
if m:
space = '' if preprocessing['use_bpe'] else ' '
protected_periods_counter += 1
for dots in sorted(set(m), reverse=True):
sentence = sentence.replace(dots, '{}P▁P{}P▁P{}'.format(space, len(dots), space))
# Normalize `->' and '' ->"
sentence = sentence.replace('`', '\'').replace('\'\'', '"')
# Strip spaces and remove multi-spaces
sentence = sentence.strip()
sentence = regex['spaces'].sub(' ', sentence)
# Embedded detokenizer
if preprocessing['embedded_detokenizer']:
sentence = '▁' + sentence.replace(' ', ' ▁')
if not preprocessing['use_bpe']:
# Separate some special charactes
sentence = regex['separate'].sub(r' \1 ', sentence)
# Separate digits in numbers
sentence = regex['digits'].sub(' \\1 ', sentence)
# Split sentence into words
words = sentence.split()
sentence = []
# For every word
for word in words:
# Find if it ends with period
if word[-1] == '.':
m = word.rstrip('.')
# If string still includes period
if '.' in m and regex['joined'].search(m):
pass
else:
word = m + ' .'
# Add word to a sentence
sentence.append(word)
# Join words as a sentence again
sentence = " ".join(sentence)
# Strip spaces and remove multi-spaces
sentence = sentence.strip()
sentence = regex['spaces'].sub(' ', sentence)
else:
# Separate all characters
sentence = regex['separate_all'].sub(' \\1', sentence)
#sentence = ' '.join([(' ' + x) if x not in (' ', '▁') else x for x in list(sentence)])
# Restore protected phrases and multidots
if protected_phrases_counter:
sentence = regex['restorephrases'].sub(lambda number: protected_phrases_replace[int(number.group(1).replace(" ", "").replace("▁", ""))], sentence)
if protected_periods_counter:
sentence = regex['restoreperiods'].sub(lambda number: ("." * int(number.group(1).replace(" ", "").replace("▁", ""))), sentence)
# Replace new line char
sentence = sentence.replace('\n', 'newlinechar')
return sentence
# Helper function for re.sub - replaces and saves replaced entity
def replace(entity):
global protected_phrases_replace, protected_phrases_counter
phrase = list(filter(None, list(entity.groups())))[0]
space = '' if preprocessing['use_bpe'] else ' '
replacement = entity.group(0).replace(phrase, '{}P▁R{}P▁R{}'.format(space, protected_phrases_counter, space))
protected_phrases_replace.append(phrase)
protected_phrases_counter += 1
return replacement
# Load detokenizer rules (for standard detokenizer)
if not preprocessing['embedded_detokenizer']:
with open(preprocessing['answers_detokenize_file'], 'r', encoding='utf-8') as answers_detokenize_file:
answers_detokenize_regex = list(filter(lambda word: False if word[0] == '#' else True, filter(None, answers_detokenize_file.read().split("\n"))))
# Returns detokenizes sentences
def detokenize(answers):
# Embedded detokenizer
if preprocessing['use_bpe']:
# return [answer.replace(' ', '').replace('▁', ' ') for answer in answers]
# Do nothing - sentence is already detokenized thanks to included SPM detokenizer in NMT enabled in setup/settings.py
return answers
detokenized_answers = []
# For every answer
for answer in answers:
# And every regex rule
for detokenize_regex in answers_detokenize_regex:
diffrence = 0
# If detokenize_regex was found in answer
if re.search(detokenize_regex, answer):
# Search for all occurrences and iterate thru them
regex = re.compile(detokenize_regex)
for p in regex.finditer(answer):
# If there are more groups - process spaces that should stay in response
if len(p.groups()) > 1:
groups = p.groups()[1:]
# Replace spaces that should stay with temporary placeholder
for i, group in enumerate(groups):
position = p.start(i+2) + (i)*22
answer = answer[:position] + answer[position:].replace(" ", "##DONOTTOUCHTHISSPACE##", 1)
# Update reges to match placeholders as spaces
detokenize_regex = detokenize_regex.replace(' ', '(?: |##DONOTTOUCHTHISSPACE##)')
# Search for all occurrences and iterate thru them again
regex = re.compile(detokenize_regex)
for p in regex.finditer(answer):
# Calculate data
replace_from = p.groups()[0]
replace_to = p.groups()[0].replace(" ", "")
position = p.start(1) + diffrence
diffrence += -len(replace_from) + len(replace_to)
# Remove spaces
answer = answer[:position] + answer[position:].replace(replace_from, replace_to, 1)
# Change placeholders back to spaces
answer = answer.replace("##DONOTTOUCHTHISSPACE##", ' ')
detokenized_answers.append(answer)
return detokenized_answers
# Prepare vocab tokens from line
re_split = re.compile('(?: |^)(?:▁(▁))?([' + re.escape(r'`~!@#$%^&*()-_=+{[}]:;\'",<>?/|\\') + '0-9]|newlinechar|\.+)')
def sentence_split(sentence):
# If not an embedded detokenizer - split by spaces
if not preprocessing['embedded_detokenizer']:
return sentence.split()
global re_split
# Prepare for split sentence into a words by ' ▁'
line = ' ▁▁' + sentence[1:].replace('▁', '▁▁')
line = re_split.sub(r' ▁\1\2 ▁', line)
# split, filer and return
return list(filter(lambda line: False if len(line) == 0 or line == '▁' else True, [token.strip() for token in line.split(' ▁')]))
# Load json file with BPE join pairs
def apply_bpe_load():
with open('{}/{}'.format(preprocessing['train_folder'], 'bpe_joins.{}.json'.format('common' if preprocessing['joined_vocab'] else 'from')), 'r', encoding='utf-8', buffering=131072) as bpe_file:
joins = {tuple(json.loads(k)): v for k, v in json.load(bpe_file).items()}
apply_bpe_init(joins)
# Set BPE join pairs (used mostly by multiprocessing)
joins = []
def apply_bpe_init(joins_data):
global joins
joins = joins_data
# Apply BPE
sentence_cache = {}
def apply_bpe(sentence):
# If BPE tokenization is disabled, return sentence
if not preprocessing['use_bpe']:
return sentence
# Speeds up tokenization
global sentence_cache
# Split sentence by ' ▁'
entities = sentence_split(sentence)
new_sentence = []
# For every entity in sentence
for entity in entities:
# If entity exists in cache - used cached (computed earlier) result
original_entity = entity
if original_entity in sentence_cache:
new_sentence.append(sentence_cache[original_entity])
continue
# Split entity into pieces (mostly chars)
entity = entity.split()
# Make pairs of neighboring pieces/chars
pairs = []
prev_char = entity[0]
for char in entity[1:]:
pairs.append((prev_char, char))
prev_char = char
# Single piece/char - nothing to join
if not pairs:
new_sentence.append(entity[0])
continue
# Make every possible join
while True:
# Joins fragment - includes only pairs that exists in current entity
subjoins = {pair:joins[pair] for pair in pairs if pair in joins}
# Find most common pair
pair = min(subjoins, key=subjoins.get, default=())
# If there's no one - entity is joined
if not pair or pair not in pairs:
break
# prepare pieces/chars
first, second = pair
new_pair = first + second
#print(pairs)
# Replace every occurence of pair with a joied one
while pair in pairs:
# Find pair occurence
index = pairs.index(pair)
# Remove pair and update neighbour pairs with joined one
if index > 0:
pairs[index - 1] = (pairs[index - 1][0], new_pair)
if index < len(pairs) - 1:
pairs[index + 1] = (new_pair, pairs[index + 1][1])
if len(pairs) == 1:
pairs[0] = (new_pair, '')
else:
del pairs[index]
# We are going to use first subword from pair to rebuild entity, so we need to add second subword of last entity as a new 'pair'
# (AB, C), (C, DEF), (DEF, GHIJK) -> AB, C, DEF, GHIJK
if pairs[-1][1]:
pairs.append((pairs[-1][1], ''))
nentity = ' '.join([first for (first, second) in pairs])
new_sentence.append(nentity)
sentence_cache[original_entity] = nentity
# Return joined sentence
return ' '.join(new_sentence)
| 36.178571 | 197 | 0.60209 |
29f660bc87dfdd8aa63cd8510e0a72db0044b361 | 4,037 | py | Python | api/handlers/quiz.py | firminsa/quiz-me-api | 80ed1dec56d72227b18c5d587655f6841a4f991a | [
"MIT"
] | null | null | null | api/handlers/quiz.py | firminsa/quiz-me-api | 80ed1dec56d72227b18c5d587655f6841a4f991a | [
"MIT"
] | null | null | null | api/handlers/quiz.py | firminsa/quiz-me-api | 80ed1dec56d72227b18c5d587655f6841a4f991a | [
"MIT"
] | null | null | null | import json
import logging
from api.common import errors
from api.common.validation import validate_authorization_header_is_present, validate_user_exists_by_id
from api.data_access import quiz_dao
from api.model.model import QuizModel
from api.utils import auth_utils
from api.utils.api_utils import build_response_with_body, build_response_without_body
def create_quiz(event, context):
try:
token = validate_authorization_header_is_present(event['headers'])
auth_utils.decode_auth_token(token)
quiz_request = QuizModel.from_request(json.loads(event['body']))
logging.debug(quiz_request.user_id)
validate_user_exists_by_id(quiz_request.user_id)
quiz = quiz_dao.create(quiz_request)
logging.debug(quiz)
logging.debug(quiz.to_dict())
return build_response_with_body(201, quiz.to_dict())
except errors.ApiError as ae:
return errors.build_response_from_api_error(ae)
except Exception as e:
return errors.build_response_from_api_error(errors.ApiError(errors.internal_server_error, e))
def get_quiz_by_id(event, context):
try:
token = validate_authorization_header_is_present(event['headers'])
auth_utils.decode_auth_token(token)
quiz = quiz_dao.get_by_id(event['pathParameters']['quiz_id'])
print('********* getting quiz')
logging.debug(quiz)
logging.debug(quiz.to_dict())
return build_response_with_body(200, quiz.to_dict())
except errors.ApiError as ae:
return errors.build_response_from_api_error(ae)
except Exception as e:
return errors.build_response_from_api_error(errors.ApiError(errors.internal_server_error, e))
def get_by_user_id(event, context):
try:
logging.debug('event headers: ', event['headers'])
token = validate_authorization_header_is_present(event['headers'])
auth_utils.decode_auth_token(token)
items = quiz_dao.get_by_user_id(event['pathParameters']['user_id'])
quizzes = [item.to_dict() for item in items if items]
return build_response_with_body(200, quizzes)
except errors.ApiError as ae:
return errors.build_response_from_api_error(ae)
except Exception as e:
return errors.build_response_from_api_error(errors.ApiError(errors.internal_server_error, e))
def update_quiz(event, context):
try:
token = validate_authorization_header_is_present(event['headers'])
auth_utils.decode_auth_token(token)
quiz_request = QuizModel.from_update_request(json.loads(event['body']))
quiz = quiz_dao.update(quiz_request)
logging.debug(quiz)
logging.debug(quiz.to_dict())
return build_response_with_body(200, quiz.to_dict())
except errors.ApiError as ae:
return errors.build_response_from_api_error(ae)
except Exception as e:
return errors.build_response_from_api_error(errors.ApiError(errors.internal_server_error, e))
def get_quizzes(event, context):
try:
items = quiz_dao.get_all()
quizzes = [item.to_dict() for item in items if items]
return build_response_with_body(200, quizzes)
except errors.ApiError as ae:
return errors.build_response_from_api_error(ae)
except Exception as e:
return errors.build_response_from_api_error(errors.ApiError(errors.internal_server_error, e))
def delete_quiz(event, context):
try:
token = validate_authorization_header_is_present(event['headers'])
auth_utils.decode_auth_token(token)
quiz_dao.delete(event['pathParameters']['quiz_id'])
return build_response_without_body(204)
except errors.ApiError as ae:
return errors.build_response_from_api_error(ae)
except KeyError as e:
return errors.build_response_from_api_error(errors.Error('MISSING_REQUIRED_PARAMETER', str(e), 400))
except Exception as e:
return errors.build_response_from_api_error(errors.ApiError(errors.internal_server_error, e))
| 33.92437 | 108 | 0.730245 |
b172f6e925d0873ca7def6387650aed5f5f15664 | 913 | py | Python | pyflarum/client/extensions/flarum/Askvortsov_ReplyTemplates.py | CWKevo/pyflarum | bdf162a6c94e3051843ec7299a4302054927498a | [
"MIT"
] | 9 | 2021-06-23T21:26:29.000Z | 2021-11-16T13:25:34.000Z | pyflarum/client/extensions/flarum/Askvortsov_ReplyTemplates.py | CWKevo/pyflarum | bdf162a6c94e3051843ec7299a4302054927498a | [
"MIT"
] | 3 | 2021-09-11T00:08:14.000Z | 2022-02-07T15:34:27.000Z | pyflarum/client/extensions/flarum/Askvortsov_ReplyTemplates.py | CWKevo/pyFlarum | 2c4e17a16b00367f140c3436f7a9148072ddd2d3 | [
"MIT"
] | 1 | 2021-08-18T12:45:14.000Z | 2021-08-18T12:45:14.000Z | from ....extensions import ExtensionMixin
from ...flarum.core.discussions import DiscussionFromBulk
class ReplyTemplatesDiscussionFromBulkMixin(DiscussionFromBulk):
@property
def replyTemplate(self) -> str:
"""
The reply template for the discussion.
"""
return self.attributes.get("replyTemplate", "")
@property
def canManageReplyTemplates(self) -> bool:
"""
Whether or not you are able to manage the discussion's reply templates.
"""
return self.attributes.get("canManageReplyTemplates", False)
class ReplyTemplatesExtension(ExtensionMixin):
"""
https://extiverse.com/extension/askvortsov/flarum-discussion-templates
"""
AUTHOR = 'askvortsov'
NAME = 'reply-templates'
@classmethod
def mixin(cls):
super().mixin(DiscussionFromBulk, ReplyTemplatesDiscussionFromBulkMixin)
| 23.410256 | 83 | 0.675794 |
6e7086c8ea1345a9289124273e71628ccacbe22e | 2,143 | py | Python | vyper/semantics/types/indexable/mapping.py | abdullathedruid/vyper | 02b1b207f453b704cf1c491741bc85be9168a373 | [
"Apache-2.0"
] | 2 | 2022-02-08T16:17:10.000Z | 2022-03-06T11:01:46.000Z | vyper/semantics/types/indexable/mapping.py | abdullathedruid/vyper | 02b1b207f453b704cf1c491741bc85be9168a373 | [
"Apache-2.0"
] | 4 | 2018-12-06T23:21:02.000Z | 2022-02-07T15:28:01.000Z | vyper/semantics/types/indexable/mapping.py | charles-cooper/vyper | bbbd8618f8427d416d6751214dd560872f8848f3 | [
"Apache-2.0"
] | null | null | null | from typing import Union
from vyper import ast as vy_ast
from vyper.exceptions import StructureException
from vyper.semantics.types.bases import BasePrimitive, DataLocation, IndexableTypeDefinition
from vyper.semantics.types.utils import get_type_from_annotation
from vyper.semantics.validation.utils import validate_expected_type
class MappingDefinition(IndexableTypeDefinition):
_id = "HashMap"
def __repr__(self):
return f"HashMap[{self.key_type}, {self.value_type}]"
def compare_type(self, other):
return (
super().compare_type(other)
and self.key_type == other.key_type
and self.value_type == other.value_type
)
def get_index_type(self, node):
validate_expected_type(node, self.key_type)
return self.value_type
class MappingPrimitive(BasePrimitive):
_id = "HashMap"
_valid_literal = ()
@classmethod
def from_annotation(
cls,
node: Union[vy_ast.Name, vy_ast.Call, vy_ast.Subscript],
location: DataLocation = DataLocation.UNSET,
is_constant: bool = False,
is_public: bool = False,
is_immutable: bool = False,
) -> MappingDefinition:
if (
not isinstance(node, vy_ast.Subscript)
or not isinstance(node.slice, vy_ast.Index)
or not isinstance(node.slice.value, vy_ast.Tuple)
or len(node.slice.value.elements) != 2
):
raise StructureException(
"HashMap must be defined with a key type and a value type", node
)
if location != DataLocation.STORAGE or is_immutable:
raise StructureException("HashMap can only be declared as a storage variable", node)
key_type = get_type_from_annotation(node.slice.value.elements[0], DataLocation.UNSET)
value_type = get_type_from_annotation(node.slice.value.elements[1], DataLocation.STORAGE)
return MappingDefinition(
value_type,
key_type,
f"HashMap[{key_type}, {value_type}]",
location,
is_constant,
is_public,
)
| 34.015873 | 97 | 0.656556 |
19ac4acc77c7430af72aaf926d4ab56fde759ffa | 383 | py | Python | tests/test_client.py | smilelight/ltp_server | ca9ce5d633efc7626730816853fd5beec2ea794f | [
"MIT"
] | 5 | 2020-10-29T11:10:29.000Z | 2021-07-17T15:03:42.000Z | tests/test_client.py | smilelight/ltp_server | ca9ce5d633efc7626730816853fd5beec2ea794f | [
"MIT"
] | null | null | null | tests/test_client.py | smilelight/ltp_server | ca9ce5d633efc7626730816853fd5beec2ea794f | [
"MIT"
] | 2 | 2021-07-09T08:45:10.000Z | 2021-08-10T11:17:35.000Z | # -*- coding: utf-8 -*-
from ltp_server import Client
if __name__ == '__main__':
client = Client()
texts = ["乔丹是一位出生在纽约的美国职业篮球运动员。"]
print(client.sent_split(texts))
print(client.seg(texts))
print(client.pos(texts))
print(client.ner(texts))
print(client.srl(texts))
print(client.dep(texts))
print(client.sdp(texts))
print(client.sdpg(texts))
| 23.9375 | 37 | 0.655352 |
9274fdddd260ec9f1b2343cdb3639dc59d9f86fb | 49,600 | py | Python | src/tf_transformers/text/sentencepiece_model_pb2.py | legacyai/tf-transformers | 65a5f9a4bcb3236483daa598a37b91673f56cb97 | [
"Apache-2.0"
] | 116 | 2021-03-15T09:48:41.000Z | 2022-03-24T05:15:51.000Z | src/tf_transformers/text/sentencepiece_model_pb2.py | legacyai/tf-transformers | 65a5f9a4bcb3236483daa598a37b91673f56cb97 | [
"Apache-2.0"
] | 4 | 2021-03-20T11:20:57.000Z | 2022-01-05T04:59:07.000Z | src/tf_transformers/text/sentencepiece_model_pb2.py | legacyai/tf-transformers | 65a5f9a4bcb3236483daa598a37b91673f56cb97 | [
"Apache-2.0"
] | 9 | 2021-03-17T04:14:48.000Z | 2021-09-13T07:15:31.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sentencepiece_model.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='sentencepiece_model.proto',
package='sentencepiece',
syntax='proto2',
serialized_options=b'H\003',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\xa1\n\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03', # noqa
)
_TRAINERSPEC_MODELTYPE = _descriptor.EnumDescriptor(
name='ModelType',
full_name='sentencepiece.TrainerSpec.ModelType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNIGRAM',
index=0,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='BPE',
index=1,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='WORD',
index=2,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='CHAR',
index=3,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1294,
serialized_end=1347,
)
_sym_db.RegisterEnumDescriptor(_TRAINERSPEC_MODELTYPE)
_MODELPROTO_SENTENCEPIECE_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='sentencepiece.ModelProto.SentencePiece.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NORMAL',
index=0,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='UNKNOWN',
index=1,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='CONTROL',
index=2,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='USER_DEFINED',
index=3,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='BYTE',
index=4,
number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name='UNUSED',
index=5,
number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2100,
serialized_end=2184,
)
_sym_db.RegisterEnumDescriptor(_MODELPROTO_SENTENCEPIECE_TYPE)
_TRAINERSPEC = _descriptor.Descriptor(
name='TrainerSpec',
full_name='sentencepiece.TrainerSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='input',
full_name='sentencepiece.TrainerSpec.input',
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='input_format',
full_name='sentencepiece.TrainerSpec.input_format',
index=1,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='model_prefix',
full_name='sentencepiece.TrainerSpec.model_prefix',
index=2,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='model_type',
full_name='sentencepiece.TrainerSpec.model_type',
index=3,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='vocab_size',
full_name='sentencepiece.TrainerSpec.vocab_size',
index=4,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=8000,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='accept_language',
full_name='sentencepiece.TrainerSpec.accept_language',
index=5,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='self_test_sample_size',
full_name='sentencepiece.TrainerSpec.self_test_sample_size',
index=6,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='character_coverage',
full_name='sentencepiece.TrainerSpec.character_coverage',
index=7,
number=10,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=float(0.9995),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='input_sentence_size',
full_name='sentencepiece.TrainerSpec.input_sentence_size',
index=8,
number=11,
type=4,
cpp_type=4,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='shuffle_input_sentence',
full_name='sentencepiece.TrainerSpec.shuffle_input_sentence',
index=9,
number=19,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='mining_sentence_size',
full_name='sentencepiece.TrainerSpec.mining_sentence_size',
index=10,
number=12,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\030\001',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='training_sentence_size',
full_name='sentencepiece.TrainerSpec.training_sentence_size',
index=11,
number=13,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\030\001',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='seed_sentencepiece_size',
full_name='sentencepiece.TrainerSpec.seed_sentencepiece_size',
index=12,
number=14,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1000000,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='shrinking_factor',
full_name='sentencepiece.TrainerSpec.shrinking_factor',
index=13,
number=15,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=float(0.75),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='max_sentence_length',
full_name='sentencepiece.TrainerSpec.max_sentence_length',
index=14,
number=18,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=4192,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='num_threads',
full_name='sentencepiece.TrainerSpec.num_threads',
index=15,
number=16,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=16,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='num_sub_iterations',
full_name='sentencepiece.TrainerSpec.num_sub_iterations',
index=16,
number=17,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=2,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='max_sentencepiece_length',
full_name='sentencepiece.TrainerSpec.max_sentencepiece_length',
index=17,
number=20,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=16,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='split_by_unicode_script',
full_name='sentencepiece.TrainerSpec.split_by_unicode_script',
index=18,
number=21,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='split_by_number',
full_name='sentencepiece.TrainerSpec.split_by_number',
index=19,
number=23,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='split_by_whitespace',
full_name='sentencepiece.TrainerSpec.split_by_whitespace',
index=20,
number=22,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='treat_whitespace_as_suffix',
full_name='sentencepiece.TrainerSpec.treat_whitespace_as_suffix',
index=21,
number=24,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='split_digits',
full_name='sentencepiece.TrainerSpec.split_digits',
index=22,
number=25,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='control_symbols',
full_name='sentencepiece.TrainerSpec.control_symbols',
index=23,
number=30,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='user_defined_symbols',
full_name='sentencepiece.TrainerSpec.user_defined_symbols',
index=24,
number=31,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='required_chars',
full_name='sentencepiece.TrainerSpec.required_chars',
index=25,
number=36,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='byte_fallback',
full_name='sentencepiece.TrainerSpec.byte_fallback',
index=26,
number=35,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='vocabulary_output_piece_score',
full_name='sentencepiece.TrainerSpec.vocabulary_output_piece_score',
index=27,
number=32,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='hard_vocab_limit',
full_name='sentencepiece.TrainerSpec.hard_vocab_limit',
index=28,
number=33,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='use_all_vocab',
full_name='sentencepiece.TrainerSpec.use_all_vocab',
index=29,
number=34,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='unk_id',
full_name='sentencepiece.TrainerSpec.unk_id',
index=30,
number=40,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='bos_id',
full_name='sentencepiece.TrainerSpec.bos_id',
index=31,
number=41,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='eos_id',
full_name='sentencepiece.TrainerSpec.eos_id',
index=32,
number=42,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=2,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='pad_id',
full_name='sentencepiece.TrainerSpec.pad_id',
index=33,
number=43,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='unk_piece',
full_name='sentencepiece.TrainerSpec.unk_piece',
index=34,
number=45,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"<unk>".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='bos_piece',
full_name='sentencepiece.TrainerSpec.bos_piece',
index=35,
number=46,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"<s>".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='eos_piece',
full_name='sentencepiece.TrainerSpec.eos_piece',
index=36,
number=47,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"</s>".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='pad_piece',
full_name='sentencepiece.TrainerSpec.pad_piece',
index=37,
number=48,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"<pad>".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='unk_surface',
full_name='sentencepiece.TrainerSpec.unk_surface',
index=38,
number=44,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b" \342\201\207 ".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='train_extremely_large_corpus',
full_name='sentencepiece.TrainerSpec.train_extremely_large_corpus',
index=39,
number=49,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[
_TRAINERSPEC_MODELTYPE,
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=45,
serialized_end=1358,
)
_NORMALIZERSPEC = _descriptor.Descriptor(
name='NormalizerSpec',
full_name='sentencepiece.NormalizerSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name',
full_name='sentencepiece.NormalizerSpec.name',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='precompiled_charsmap',
full_name='sentencepiece.NormalizerSpec.precompiled_charsmap',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='add_dummy_prefix',
full_name='sentencepiece.NormalizerSpec.add_dummy_prefix',
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='remove_extra_whitespaces',
full_name='sentencepiece.NormalizerSpec.remove_extra_whitespaces',
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='escape_whitespaces',
full_name='sentencepiece.NormalizerSpec.escape_whitespaces',
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='normalization_rule_tsv',
full_name='sentencepiece.NormalizerSpec.normalization_rule_tsv',
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1361,
serialized_end=1570,
)
_SELFTESTDATA_SAMPLE = _descriptor.Descriptor(
name='Sample',
full_name='sentencepiece.SelfTestData.Sample',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='input',
full_name='sentencepiece.SelfTestData.Sample.input',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='expected',
full_name='sentencepiece.SelfTestData.Sample.expected',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[],
serialized_start=1641,
serialized_end=1682,
)
_SELFTESTDATA = _descriptor.Descriptor(
name='SelfTestData',
full_name='sentencepiece.SelfTestData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='samples',
full_name='sentencepiece.SelfTestData.samples',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_SELFTESTDATA_SAMPLE,
],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1572,
serialized_end=1693,
)
_MODELPROTO_SENTENCEPIECE = _descriptor.Descriptor(
name='SentencePiece',
full_name='sentencepiece.ModelProto.SentencePiece',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='piece',
full_name='sentencepiece.ModelProto.SentencePiece.piece',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='score',
full_name='sentencepiece.ModelProto.SentencePiece.score',
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='type',
full_name='sentencepiece.ModelProto.SentencePiece.type',
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[
_MODELPROTO_SENTENCEPIECE_TYPE,
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1985,
serialized_end=2195,
)
_MODELPROTO = _descriptor.Descriptor(
name='ModelProto',
full_name='sentencepiece.ModelProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='pieces',
full_name='sentencepiece.ModelProto.pieces',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='trainer_spec',
full_name='sentencepiece.ModelProto.trainer_spec',
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='normalizer_spec',
full_name='sentencepiece.ModelProto.normalizer_spec',
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='self_test_data',
full_name='sentencepiece.ModelProto.self_test_data',
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='denormalizer_spec',
full_name='sentencepiece.ModelProto.denormalizer_spec',
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_MODELPROTO_SENTENCEPIECE,
],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1696,
serialized_end=2206,
)
_TRAINERSPEC.fields_by_name['model_type'].enum_type = _TRAINERSPEC_MODELTYPE
_TRAINERSPEC_MODELTYPE.containing_type = _TRAINERSPEC
_SELFTESTDATA_SAMPLE.containing_type = _SELFTESTDATA
_SELFTESTDATA.fields_by_name['samples'].message_type = _SELFTESTDATA_SAMPLE
_MODELPROTO_SENTENCEPIECE.fields_by_name['type'].enum_type = _MODELPROTO_SENTENCEPIECE_TYPE
_MODELPROTO_SENTENCEPIECE.containing_type = _MODELPROTO
_MODELPROTO_SENTENCEPIECE_TYPE.containing_type = _MODELPROTO_SENTENCEPIECE
_MODELPROTO.fields_by_name['pieces'].message_type = _MODELPROTO_SENTENCEPIECE
_MODELPROTO.fields_by_name['trainer_spec'].message_type = _TRAINERSPEC
_MODELPROTO.fields_by_name['normalizer_spec'].message_type = _NORMALIZERSPEC
_MODELPROTO.fields_by_name['self_test_data'].message_type = _SELFTESTDATA
_MODELPROTO.fields_by_name['denormalizer_spec'].message_type = _NORMALIZERSPEC
DESCRIPTOR.message_types_by_name['TrainerSpec'] = _TRAINERSPEC
DESCRIPTOR.message_types_by_name['NormalizerSpec'] = _NORMALIZERSPEC
DESCRIPTOR.message_types_by_name['SelfTestData'] = _SELFTESTDATA
DESCRIPTOR.message_types_by_name['ModelProto'] = _MODELPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainerSpec = _reflection.GeneratedProtocolMessageType(
'TrainerSpec',
(_message.Message,),
{
'DESCRIPTOR': _TRAINERSPEC,
'__module__': 'sentencepiece_model_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec)
},
)
_sym_db.RegisterMessage(TrainerSpec)
NormalizerSpec = _reflection.GeneratedProtocolMessageType(
'NormalizerSpec',
(_message.Message,),
{
'DESCRIPTOR': _NORMALIZERSPEC,
'__module__': 'sentencepiece_model_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec)
},
)
_sym_db.RegisterMessage(NormalizerSpec)
SelfTestData = _reflection.GeneratedProtocolMessageType(
'SelfTestData',
(_message.Message,),
{
'Sample': _reflection.GeneratedProtocolMessageType(
'Sample',
(_message.Message,),
{
'DESCRIPTOR': _SELFTESTDATA_SAMPLE,
'__module__': 'sentencepiece_model_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample)
},
),
'DESCRIPTOR': _SELFTESTDATA,
'__module__': 'sentencepiece_model_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData)
},
)
_sym_db.RegisterMessage(SelfTestData)
_sym_db.RegisterMessage(SelfTestData.Sample)
ModelProto = _reflection.GeneratedProtocolMessageType(
'ModelProto',
(_message.Message,),
{
'SentencePiece': _reflection.GeneratedProtocolMessageType(
'SentencePiece',
(_message.Message,),
{
'DESCRIPTOR': _MODELPROTO_SENTENCEPIECE,
'__module__': 'sentencepiece_model_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece)
},
),
'DESCRIPTOR': _MODELPROTO,
'__module__': 'sentencepiece_model_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.ModelProto)
},
)
_sym_db.RegisterMessage(ModelProto)
_sym_db.RegisterMessage(ModelProto.SentencePiece)
DESCRIPTOR._options = None
_TRAINERSPEC.fields_by_name['mining_sentence_size']._options = None
_TRAINERSPEC.fields_by_name['training_sentence_size']._options = None
# @@protoc_insertion_point(module_scope)
| 34.13627 | 4,068 | 0.582359 |
5d99ad58e19ad3f0cda4dc56066d503e643182cd | 13,700 | py | Python | src/main/resources/scripts/lib/fedconfig.py | cwiechmann/apigw-maven-plugin | 8a29cd3ccfa56d444176ea02df292cf6bab60f4c | [
"Apache-2.0"
] | null | null | null | src/main/resources/scripts/lib/fedconfig.py | cwiechmann/apigw-maven-plugin | 8a29cd3ccfa56d444176ea02df292cf6bab60f4c | [
"Apache-2.0"
] | null | null | null | src/main/resources/scripts/lib/fedconfig.py | cwiechmann/apigw-maven-plugin | 8a29cd3ccfa56d444176ea02df292cf6bab60f4c | [
"Apache-2.0"
] | null | null | null | import vutil, os, sys, re, json
import logging
from archiveutil import DeploymentArchiveAPI
from esapi import EntityStoreAPI
from java.io import File
from java.lang import String
from java.security import KeyFactory, KeyStore
from com.vordel.store.util import ChangeEncryptedFields
from com.vordel.archive.fed import PolicyArchive, EnvironmentArchive, DeploymentArchive
from com.vordel.common.base64 import Encoder, Decoder
from com.vordel.security.openssl import PKCS12
from envconfig import EnvConfig
from envconfig import CertConfig
from envconfig import CertInfo
from java.io import File, FileInputStream, FileReader, ByteArrayInputStream
from java.security.cert import CertificateFactory
class FedConfigurator:
def __init__(self, pol_archive_path, env_archive_path, config_path, cert_config_path = None, properties = None, passphrase = "", confidentials=None):
self.__cert_config = None
self.__simulation_mode = False
self.__update_cert_config = False
self.__expiration_days = -1
self.__base_dir = None
self.__passphrase_in = passphrase
self.__pol_archive = PolicyArchive(pol_archive_path)
self.__env_archive = EnvironmentArchive(env_archive_path)
self.__fed_archive = None
try:
self.__fed_archive = DeploymentArchive(self.__pol_archive, self.__env_archive, self.__passphrase_in)
except TypeError:
# backward compatibility for 7.5.3
self.__fed_archive = DeploymentArchive(self.__pol_archive, self.__env_archive)
self.__config = EnvConfig(config_path, properties, confidentials)
if cert_config_path is not None:
self.__cert_config = CertConfig(cert_config_path, properties, confidentials)
logging.info("Deployment archive configuration initialized")
return
def enable_cert_config_update(self):
self.__update_cert_config = True
def enable_simulation_mode(self):
self.__simulation_mode = True
def set_cert_expiration_days(self, days):
self.__expiration_days = days
def set_system_properties(self, sys_properties):
self.__config.set_system_properties(sys_properties)
def set_base_dir(self, base_dir):
self.__base_dir = base_dir
def configure(self, passphrase = ""):
succeeded = self.__configure_entities()
if succeeded:
succeeded = self.__configure_certificates()
if not succeeded:
logging.error("Configuration of certificates failed!")
else:
logging.error("Configuration of entities failed; check JSON configuration for unconfigured entity fields!")
if succeeded and self.__passphrase_in != passphrase:
fed_api = DeploymentArchiveAPI(self.__fed_archive, self.__passphrase_in)
changer = ChangeEncryptedFields(fed_api.entityStore)
changer.execute(passphrase, self.__passphrase_in)
fed_api.deploymentArchive.updateConfiguration(fed_api.entityStore)
logging.info("Passphrase for output archives changed")
return succeeded
def __configure_entities(self):
logging.info("Configure environmentalized entities")
fed_api = DeploymentArchiveAPI(self.__fed_archive, self.__passphrase_in)
env_settings = fed_api.envSettings.getEnvSettings()
succeeded = True
config = {}
for env_entity in env_settings.getEnvironmentalizedEntities():
env_fields = env_entity.getEnvironmentalizedFields()
for env_field in env_fields:
field_value = self.__config.get_value(env_entity, env_field)
if (field_value.key.type == "reference"):
raise ValueError("Reference types are not supported for environmentalization: name=%s; index=%d; type=%s; entity=%s" \
% (field_value.key.name, field_value.key.index, field_value.key.type, field_value.key.short_hand_key))
if (field_value.value is not None):
logging.info("Configure field: name=%s; index=%d; type=%s; entity=%s" % (field_value.key.name, field_value.key.index, field_value.key.type, field_value.key.short_hand_key))
if not self.__simulation_mode:
if field_value.key.short_hand_key not in config:
config[field_value.key.short_hand_key] = []
if field_value.key.type == "integer":
config[field_value.key.short_hand_key].append([field_value.key.name, field_value.key.index, int(field_value.value)])
else:
config[field_value.key.short_hand_key].append([field_value.key.name, field_value.key.index, str(field_value.value)])
else:
logging.error("Unconfigured field: name=%s; index=%d; type=%s; entity=%s" % (field_value.key.name, field_value.key.index, field_value.key.type, field_value.key.short_hand_key))
succeeded = False
if succeeded:
if not self.__simulation_mode:
fed_api.addEnvSettings(config)
logging.info("Environmentalized fields updated.")
else:
logging.info("[SIMULATION_MODE] Environmentalized fields simulation succeeded.")
self.__config.update_config_file()
return succeeded
def __resolve_file_path(self, file):
if file and self.__base_dir:
file = os.path.join(self.__base_dir, file)
return file
def __get_certificate_infos(self):
infos = []
es = EntityStoreAPI.wrap(self.__fed_archive.getEntityStore(), self.__passphrase_in)
cert_entities = es.getAll("/[Certificates]name=Certificate Store/[Certificate]**")
cf = CertificateFactory.getInstance("X.509")
for cert_entity in cert_entities:
alias = cert_entity.getStringValue("dname")
subject = None
not_after = None
content = cert_entity.getBinaryValue("content")
if content:
cert = cf.generateCertificate(ByteArrayInputStream(content))
subject = cert.getSubjectDN().getName()
not_after = cert.getNotAfter()
infos.append(CertInfo(alias, subject, not_after))
return infos
def __get_key_from_p12(self, file, password=None):
io = FileInputStream(file)
pkcs12 = PKCS12(io)
if password is not None:
try:
pkcs12.decrypt(String(password).toCharArray())
except:
raise ValueError("Invalid passphrase for .p12 certificate!")
return pkcs12.getKey()
def __get_cert_from_p12(self, file, password=None):
ks = KeyStore.getInstance("PKCS12")
io = FileInputStream(file)
if password is None:
ks.load(io, None)
else:
ks.load(io, String(password).toCharArray())
io.close()
for alias in ks.aliases():
if ks.isKeyEntry(alias):
return ks.getCertificate(alias)
return None
def __add_or_replace_certificate(self, es, alias, cert, private_key=None):
cert_store = es.get('/[Certificates]name=Certificate Store')
# Get or create certificate entity
cert_entity = es.getChild(cert_store, '[Certificate]dname=%s' % (es.escapeField(alias)))
if cert_entity is None:
cert_entity = es.createEntity("Certificate")
cert_entity.setStringField("dname", alias)
es.addEntity(cert_store, cert_entity)
cert_entity = es.getChild(cert_store, '[Certificate]dname=%s' % (es.escapeField(alias)))
# Set certificate content
cert_entity.setBinaryValue("content", cert.getEncoded())
# Set or remove private key
if private_key is not None:
cert_entity.setStringField("key", es.encryptBytes(private_key.getEncoded()))
else:
entity_private_key = cert_entity.getStringValue("key")
if not entity_private_key:
cert_entity.removeField("key")
es.updateEntity(cert_entity)
return
def __remove_certificate(self, es, alias):
# Get certificate entity
cert_store = es.get('/[Certificates]name=Certificate Store')
cert_entity = es.getChild(cert_store, '[Certificate]dname=%s' % (es.escapeField(alias)))
if cert_entity:
es.cutEntity(cert_entity)
return
def __configure_certificates(self):
if self.__cert_config is not None:
# determine existing certificates
logging.info("Determine existing certificates")
cert_infos = self.__get_certificate_infos()
self.__cert_config.set_cert_infos(cert_infos)
self.__cert_config.update_config_file()
# apply configured certificates
logging.info("Configure certificates")
certs = self.__cert_config.get_certificates()
es = EntityStoreAPI.wrap(self.__fed_archive.getEntityStore(), self.__passphrase_in)
cert_infos = []
cert = None
for cert_ref in certs:
file = self.__resolve_file_path(cert_ref.get_file())
logging.info("Process alias '%s' (%s): %s" % (cert_ref.get_alias(), cert_ref.get_type(), file))
if cert_ref.get_type() == "crt":
cf = CertificateFactory.getInstance("X.509")
if os.path.isfile(file):
fis = FileInputStream (file)
cert = cf.generateCertificate(fis)
self.__add_or_replace_certificate(es, cert_ref.get_alias(), cert)
else:
if self.__simulation_mode:
logging.warning("[SIMULATION_MODE] Certificate file not found, certificate (CRT) ignored: alias=%s" % (cert_ref.get_alias()))
continue
else:
raise ValueError("Certificate file not found for alias '%s': %s" % (cert_ref.get_alias(), file))
elif cert_ref.get_type() == "p12":
if os.path.isfile(file):
key = self.__get_key_from_p12(file, cert_ref.get_password())
cert = self.__get_cert_from_p12(file, cert_ref.get_password())
self.__add_or_replace_certificate(es, cert_ref.get_alias(), cert, key)
else:
if self.__simulation_mode:
logging.warning("[SIMULATION_MODE] Certificate file not found, certificate (P12) ignored: alias=%s" % (cert_ref.get_alias()))
continue
else:
raise ValueError("Certificate file not found for alias '%s': %s" % (cert_ref.get_alias(), file))
elif cert_ref.get_type() == "empty":
self.__remove_certificate(es, cert_ref.get_alias())
logging.info("Certificate removed: %s" % (cert_ref.get_alias()))
continue
else:
raise ValueError("Unsupported certificate type: %s" % (cert_ref.get_type()))
subject = cert.getSubjectDN().getName()
not_after = cert.getNotAfter()
cert_info = CertInfo(cert_ref.get_alias(), subject, not_after)
logging.info("Certificate (%s) added/replaced: %s [%s] - %s" % (cert_ref.get_type(), cert_info.get_alias(), cert_info.format_not_after(), cert_info.get_subject()))
cert_infos.append(cert_info)
if self.__update_cert_config:
self.__cert_config.set_update_cert_infos(cert_infos)
self.__cert_config.update_config_file()
if self.__expiration_days >= 0:
logging.info("Checking for certificate expiration within %i days." % (self.__expiration_days))
has_expired = False
for cert_info in cert_infos:
expiration_days = cert_info.expiration_in_days()
if self.__expiration_days > expiration_days:
logging.error("Certificate '%s' expires in %i days!" % (cert_info.get_alias(), expiration_days))
has_expired = True
if has_expired:
raise ValueError("At least one certificate expires in less than %i days; check log file!" % (self.__expiration_days))
if not self.__simulation_mode:
DeploymentArchive.updateConfiguration(self.__fed_archive, es.es)
logging.info("Certificates updated.")
else:
logging.info("[SIMULATION_MODE] Certificates simulation succeeded.")
return True
def get_unconfigured_fields(self):
return self.__config.get_unconfigured_fields()
def write_fed(self, fed_path):
if "/" not in fed_path and "\\" not in fed_path:
fed_path = "./" + fed_path
self.__fed_archive.writeToArchiveFile(fed_path)
logging.info("Deployment archive written to '%s'" % (fed_path))
return
def write_env(self, env_path):
if "/" not in env_path and "\\" not in env_path:
env_path = "./" + env_path
env_archive = EnvironmentArchive(self.__fed_archive)
env_archive.writeToArchiveFile(env_path)
logging.info("Environment archive written to '%s'" % (env_path))
return
| 45.364238 | 196 | 0.625839 |
a38b2a1a2b972599d5acd02fd1a4d0fc82ca9340 | 11,907 | py | Python | dvc/remote/gdrive.py | dickmao/dvc | 6958901d98c316340b46be08e959593f40129b8a | [
"Apache-2.0"
] | null | null | null | dvc/remote/gdrive.py | dickmao/dvc | 6958901d98c316340b46be08e959593f40129b8a | [
"Apache-2.0"
] | null | null | null | dvc/remote/gdrive.py | dickmao/dvc | 6958901d98c316340b46be08e959593f40129b8a | [
"Apache-2.0"
] | null | null | null | import os
import posixpath
import logging
import threading
import re
from funcy import retry, compose, decorator, wrap_with
from funcy.py3 import cat
from dvc.progress import Tqdm
from dvc.scheme import Schemes
from dvc.path_info import CloudURLInfo
from dvc.remote.base import RemoteBASE
from dvc.config import Config
from dvc.exceptions import DvcException
from dvc.utils import tmp_fname
logger = logging.getLogger(__name__)
FOLDER_MIME_TYPE = "application/vnd.google-apps.folder"
class GDriveRetriableError(DvcException):
pass
class GDriveAccessTokenRefreshError(DvcException):
pass
class GDriveMissedCredentialKeyError(DvcException):
pass
@decorator
def _wrap_pydrive_retriable(call):
from apiclient import errors
from pydrive2.files import ApiRequestError
try:
result = call()
except (ApiRequestError, errors.HttpError) as exception:
retry_codes = ["403", "500", "502", "503", "504"]
if any(
"HttpError {}".format(code) in str(exception)
for code in retry_codes
):
raise GDriveRetriableError("Google API request failed")
raise
return result
gdrive_retry = compose(
# 8 tries, start at 0.5s, multiply by golden ratio, cap at 10s
retry(
8, GDriveRetriableError, timeout=lambda a: min(0.5 * 1.618 ** a, 10)
),
_wrap_pydrive_retriable,
)
class RemoteGDrive(RemoteBASE):
scheme = Schemes.GDRIVE
path_cls = CloudURLInfo
REQUIRES = {"pydrive2": "pydrive2"}
DEFAULT_NO_TRAVERSE = False
GDRIVE_USER_CREDENTIALS_DATA = "GDRIVE_USER_CREDENTIALS_DATA"
DEFAULT_USER_CREDENTIALS_FILE = "gdrive-user-credentials.json"
def __init__(self, repo, config):
super().__init__(repo, config)
self.path_info = self.path_cls(config[Config.SECTION_REMOTE_URL])
bucket = re.search(
"{}://(.*)".format(self.scheme),
config[Config.SECTION_REMOTE_URL],
re.IGNORECASE,
)
self.bucket = (
bucket.group(1).split("/")[0] if bucket else self.path_info.bucket
)
self.config = config
self.init_drive()
def init_drive(self):
self.client_id = self.config.get(Config.SECTION_GDRIVE_CLIENT_ID, None)
self.client_secret = self.config.get(
Config.SECTION_GDRIVE_CLIENT_SECRET, None
)
if not self.client_id or not self.client_secret:
raise DvcException(
"Please specify Google Drive's client id and "
"secret in DVC's config. Learn more at "
"https://man.dvc.org/remote/add."
)
self.gdrive_user_credentials_path = (
tmp_fname(os.path.join(self.repo.tmp_dir, ""))
if os.getenv(RemoteGDrive.GDRIVE_USER_CREDENTIALS_DATA)
else self.config.get(
Config.SECTION_GDRIVE_USER_CREDENTIALS_FILE,
os.path.join(
self.repo.tmp_dir, self.DEFAULT_USER_CREDENTIALS_FILE
),
)
)
@gdrive_retry
def gdrive_upload_file(
self, args, no_progress_bar=True, from_file="", progress_name=""
):
item = self.drive.CreateFile(
{"title": args["title"], "parents": [{"id": args["parent_id"]}]}
)
with open(from_file, "rb") as fobj:
total = os.path.getsize(from_file)
with Tqdm.wrapattr(
fobj,
"read",
desc=progress_name,
total=total,
disable=no_progress_bar,
) as wrapped:
# PyDrive doesn't like content property setting for empty files
# https://github.com/gsuitedevs/PyDrive/issues/121
if total:
item.content = wrapped
item.Upload()
return item
@gdrive_retry
def gdrive_download_file(
self, file_id, to_file, progress_name, no_progress_bar
):
gdrive_file = self.drive.CreateFile({"id": file_id})
bar_format = (
"Donwloading {desc:{ncols_desc}.{ncols_desc}}... "
+ Tqdm.format_sizeof(int(gdrive_file["fileSize"]), "B", 1024)
)
with Tqdm(
bar_format=bar_format, desc=progress_name, disable=no_progress_bar
):
gdrive_file.GetContentFile(to_file)
def gdrive_list_item(self, query):
file_list = self.drive.ListFile({"q": query, "maxResults": 1000})
# Isolate and decorate fetching of remote drive items in pages
get_list = gdrive_retry(lambda: next(file_list, None))
# Fetch pages until None is received, lazily flatten the thing
return cat(iter(get_list, None))
def cache_root_dirs(self):
cached_dirs = {}
cached_ids = {}
for dir1 in self.gdrive_list_item(
"'{}' in parents and trashed=false".format(self.remote_root_id)
):
remote_path = posixpath.join(self.path_info.path, dir1["title"])
cached_dirs.setdefault(remote_path, []).append(dir1["id"])
cached_ids[dir1["id"]] = dir1["title"]
return cached_dirs, cached_ids
@property
def cached_dirs(self):
if not hasattr(self, "_cached_dirs"):
self.drive
return self._cached_dirs
@property
def cached_ids(self):
if not hasattr(self, "_cached_ids"):
self.drive
return self._cached_ids
@property
@wrap_with(threading.RLock())
def drive(self):
from pydrive2.auth import RefreshError
if not hasattr(self, "_gdrive"):
from pydrive2.auth import GoogleAuth
from pydrive2.drive import GoogleDrive
if os.getenv(RemoteGDrive.GDRIVE_USER_CREDENTIALS_DATA):
with open(
self.gdrive_user_credentials_path, "w"
) as credentials_file:
credentials_file.write(
os.getenv(RemoteGDrive.GDRIVE_USER_CREDENTIALS_DATA)
)
GoogleAuth.DEFAULT_SETTINGS["client_config_backend"] = "settings"
GoogleAuth.DEFAULT_SETTINGS["client_config"] = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"revoke_uri": "https://oauth2.googleapis.com/revoke",
"redirect_uri": "",
}
GoogleAuth.DEFAULT_SETTINGS["save_credentials"] = True
GoogleAuth.DEFAULT_SETTINGS["save_credentials_backend"] = "file"
GoogleAuth.DEFAULT_SETTINGS[
"save_credentials_file"
] = self.gdrive_user_credentials_path
GoogleAuth.DEFAULT_SETTINGS["get_refresh_token"] = True
GoogleAuth.DEFAULT_SETTINGS["oauth_scope"] = [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.appdata",
]
# Pass non existent settings path to force DEFAULT_SETTINGS loading
gauth = GoogleAuth(settings_file="")
try:
gauth.CommandLineAuth()
except RefreshError as exc:
raise GDriveAccessTokenRefreshError(
"Google Drive's access token refreshment is failed"
) from exc
except KeyError as exc:
raise GDriveMissedCredentialKeyError(
"Google Drive's user credentials file '{}' "
"misses value for key '{}'".format(
self.gdrive_user_credentials_path, str(exc)
)
)
# Handle pydrive2.auth.AuthenticationError and others auth failures
except Exception as exc:
raise DvcException(
"Google Drive authentication failed"
) from exc
finally:
if os.getenv(RemoteGDrive.GDRIVE_USER_CREDENTIALS_DATA):
os.remove(self.gdrive_user_credentials_path)
self._gdrive = GoogleDrive(gauth)
self.remote_root_id = self.get_remote_id(
self.path_info, create=True
)
self._cached_dirs, self._cached_ids = self.cache_root_dirs()
return self._gdrive
@gdrive_retry
def create_remote_dir(self, parent_id, title):
item = self.drive.CreateFile(
{
"title": title,
"parents": [{"id": parent_id}],
"mimeType": FOLDER_MIME_TYPE,
}
)
item.Upload()
return item
@gdrive_retry
def get_remote_item(self, name, parents_ids):
if not parents_ids:
return None
query = "({})".format(
" or ".join(
"'{}' in parents".format(parent_id)
for parent_id in parents_ids
)
)
query += " and trashed=false and title='{}'".format(name)
# Limit found remote items count to 1 in response
item_list = self.drive.ListFile(
{"q": query, "maxResults": 1}
).GetList()
return next(iter(item_list), None)
def resolve_remote_item_from_path(self, path_parts, create):
parents_ids = [self.bucket]
current_path = ""
for path_part in path_parts:
current_path = posixpath.join(current_path, path_part)
remote_ids = self.get_remote_id_from_cache(current_path)
if remote_ids:
parents_ids = remote_ids
continue
item = self.get_remote_item(path_part, parents_ids)
if not item and create:
item = self.create_remote_dir(parents_ids[0], path_part)
elif not item:
return None
parents_ids = [item["id"]]
return item
def get_remote_id_from_cache(self, remote_path):
if hasattr(self, "_cached_dirs"):
return self.cached_dirs.get(remote_path, [])
return []
def get_remote_id(self, path_info, create=False):
remote_ids = self.get_remote_id_from_cache(path_info.path)
if remote_ids:
return remote_ids[0]
file1 = self.resolve_remote_item_from_path(
path_info.path.split("/"), create
)
return file1["id"] if file1 else ""
def exists(self, path_info):
return self.get_remote_id(path_info) != ""
def _upload(self, from_file, to_info, name, no_progress_bar):
dirname = to_info.parent
if dirname:
parent_id = self.get_remote_id(dirname, True)
else:
parent_id = to_info.bucket
self.gdrive_upload_file(
{"title": to_info.name, "parent_id": parent_id},
no_progress_bar,
from_file,
name,
)
def _download(self, from_info, to_file, name, no_progress_bar):
file_id = self.get_remote_id(from_info)
self.gdrive_download_file(file_id, to_file, name, no_progress_bar)
def all(self):
if not self.cached_ids:
return
query = "({})".format(
" or ".join(
"'{}' in parents".format(dir_id) for dir_id in self.cached_ids
)
)
query += " and trashed=false"
for file1 in self.gdrive_list_item(query):
parent_id = file1["parents"][0]["id"]
path = posixpath.join(self.cached_ids[parent_id], file1["title"])
try:
yield self.path_to_checksum(path)
except ValueError:
# We ignore all the non-cache looking files
logger.debug('Ignoring path as "non-cache looking"')
| 33.730878 | 79 | 0.589653 |
559bccd6e5b11f86146b7ea88dd017c98c08b043 | 7,470 | py | Python | networkx-d3-v2/networkx/conf/common.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | null | null | null | networkx-d3-v2/networkx/conf/common.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | null | null | null | networkx-d3-v2/networkx/conf/common.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | null | null | null | import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
VERSION = os.environ.get('CURRENT_VERSION_ID', '').split('.')[0]
HOSTNAME = 'http://%s' % os.environ.get('HTTP_HOST', '').replace("%s." % VERSION, "")
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# A custom cache backend using AppEngine's memcached
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'TIMEOUT': 15 * 60,
}
}
"""
Custom session engine using our cache or writing through to the datastore If
using SQL, can we use django's standard write through? If gae memecached is
stable enough, it would be faster to use
django.contrib.sessions.backends.cache?
"""
SESSION_ENGINE = "appengine_sessions.backends.cached_db"
# Uncomment these DB definitions to use Cloud SQL.
# See: https://developers.google.com/cloud-sql/docs/django#development-settings
#import os
#if (os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine') or
# os.getenv('SETTINGS_MODE') == 'prod'):
# # Running on production App Engine, so use a Google Cloud SQL database.
# DATABASES = {
# 'default': {
# 'ENGINE': 'google.appengine.ext.django.backends.rdbms',
# 'INSTANCE': 'my_project:instance1',
# 'NAME': 'my_db',
# }
# }
#else:
# # Running in development, so use a local MySQL database.
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
## 'USER': 'root',
## 'PASSWORD': '',
## 'HOST': 'localhost',
# 'NAME': 'my_db',
# }
# }
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Use the new automatic timezone features Django 1.4 brings
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/staticfiles/"
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^(&s11q!@t2j@=dgpp65k+df6o1(@1h9cq-$^p@=k4!5))xi6u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'google.appengine.ext.ndb.django_middleware.NdbDjangoMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'networkx.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'networkx.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
INSTALLED_APPS = (
# 'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'clients',
'auth',
'appengine_sessions',
)
TEMPLATE_CONTEXT_PROCESSORS = (
# "django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
"auth.context_processors.google_user"
)
FIXTURE_DIRS = (
os.path.join(PROJECT_DIR, 'fixtures'),
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
HOSTNAME = 'http://%s' % os.environ.get('HTTP_HOST')
# Google oauth settings
OAUTH_SETTINGS = {
'client_id': 'client_id', # overwrite on the specific settings file
'client_secret': 'client_secret', # overwrite on the specific settings file
'redirect_uri': '%s/oauth2callback' % HOSTNAME,
'scopes': [
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/drive.readonly',
'https://spreadsheets.google.com/feeds/'
],
'login_url': 'https://accounts.google.com/o/oauth2/auth',
'token_url': 'https://accounts.google.com/o/oauth2/token',
'user_agent': 'appengine/networkx'
}
OAUTH_DEFAULT_REDIRECT = 'home'
OAUTH_FAILED_REDIRECT = 'auth-failed'
OAUTH_SESSION_KEYS = [
'user',
'credentials',
'flow',
'request_token',
'auth_service'
]
| 31.652542 | 85 | 0.690361 |
f74b81037be83ae7ee405b51f6737b40ceb936cb | 555 | py | Python | regexlib/2021-5-15/python_re2_test_file/regexlib_3613.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | 1 | 2022-01-24T14:43:23.000Z | 2022-01-24T14:43:23.000Z | regexlib/2021-5-15/python_re2_test_file/regexlib_3613.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | regexlib/2021-5-15/python_re2_test_file/regexlib_3613.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | # 3613
# <(?<tag>\w*|\w*\.+\w*)>+((.|[\n\t\f\r\s])*?)<\/\k<tag>>
# EXPONENT
# nums:5
# EXPONENT AttackString:"<>"+"\t\t"*16+"! _1_EOD(i2)"
import re2 as re
from time import perf_counter
regex = """<(?<tag>\w*|\w*\.+\w*)>+((.|[\n\t\f\r\s])*?)<\/\k<tag>>"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "<>" + "\t\t" * i * 1 + "! _1_EOD(i2)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | 29.210526 | 69 | 0.52973 |
649aa25c5fe68f355a9cfb4ed25da9e8de88c391 | 6,497 | py | Python | venv/lib/python3.6/site-packages/routes/middleware.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 105 | 2015-01-27T02:33:17.000Z | 2022-03-06T06:08:47.000Z | venv/lib/python3.6/site-packages/routes/middleware.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 75 | 2015-01-05T21:16:02.000Z | 2021-12-06T21:13:43.000Z | venv/lib/python3.6/site-packages/routes/middleware.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 48 | 2015-01-19T00:40:23.000Z | 2022-03-06T06:08:53.000Z | """Routes WSGI Middleware"""
import re
import logging
from webob import Request
from routes.base import request_config
from routes.util import URLGenerator
log = logging.getLogger('routes.middleware')
class RoutesMiddleware(object):
"""Routing middleware that handles resolving the PATH_INFO in
addition to optionally recognizing method overriding.
.. Note::
This module requires webob to be installed. To depend on it, you may
list routes[middleware] in your ``requirements.txt``
"""
def __init__(self, wsgi_app, mapper, use_method_override=True,
path_info=True, singleton=True):
"""Create a Route middleware object
Using the use_method_override keyword will require Paste to be
installed, and your application should use Paste's WSGIRequest
object as it will properly handle POST issues with wsgi.input
should Routes check it.
If path_info is True, then should a route var contain
path_info, the SCRIPT_NAME and PATH_INFO will be altered
accordingly. This should be used with routes like:
.. code-block:: python
map.connect('blog/*path_info', controller='blog', path_info='')
"""
self.app = wsgi_app
self.mapper = mapper
self.singleton = singleton
self.use_method_override = use_method_override
self.path_info = path_info
self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
if self.log_debug:
log.debug("Initialized with method overriding = %s, and path "
"info altering = %s", use_method_override, path_info)
def __call__(self, environ, start_response):
"""Resolves the URL in PATH_INFO, and uses wsgi.routing_args
to pass on URL resolver results."""
old_method = None
if self.use_method_override:
req = None
# In some odd cases, there's no query string
try:
qs = environ['QUERY_STRING']
except KeyError:
qs = ''
if '_method' in qs:
req = Request(environ)
req.errors = 'ignore'
try:
method = req.GET.get('_method')
except UnicodeDecodeError:
method = None
if method:
old_method = environ['REQUEST_METHOD']
environ['REQUEST_METHOD'] = method.upper()
if self.log_debug:
log.debug("_method found in QUERY_STRING, altering "
"request method to %s",
environ['REQUEST_METHOD'])
elif environ['REQUEST_METHOD'] == 'POST' and is_form_post(environ):
if req is None:
req = Request(environ)
req.errors = 'ignore'
try:
method = req.POST.get('_method')
except UnicodeDecodeError:
method = None
if method:
old_method = environ['REQUEST_METHOD']
environ['REQUEST_METHOD'] = method.upper()
if self.log_debug:
log.debug("_method found in POST data, altering "
"request method to %s",
environ['REQUEST_METHOD'])
# Run the actual route matching
# -- Assignment of environ to config triggers route matching
if self.singleton:
config = request_config()
config.mapper = self.mapper
config.environ = environ
match = config.mapper_dict
route = config.route
else:
results = self.mapper.routematch(environ=environ)
if results:
match, route = results[0], results[1]
else:
match = route = None
if old_method:
environ['REQUEST_METHOD'] = old_method
if not match:
match = {}
if self.log_debug:
urlinfo = "%s %s" % (environ['REQUEST_METHOD'],
environ['PATH_INFO'])
log.debug("No route matched for %s", urlinfo)
elif self.log_debug:
urlinfo = "%s %s" % (environ['REQUEST_METHOD'],
environ['PATH_INFO'])
log.debug("Matched %s", urlinfo)
log.debug("Route path: '%s', defaults: %s", route.routepath,
route.defaults)
log.debug("Match dict: %s", match)
url = URLGenerator(self.mapper, environ)
environ['wsgiorg.routing_args'] = ((url), match)
environ['routes.route'] = route
environ['routes.url'] = url
if route and route.redirect:
route_name = '_redirect_%s' % id(route)
location = url(route_name, **match)
log.debug("Using redirect route, redirect to '%s' with status"
"code: %s", location, route.redirect_status)
start_response(route.redirect_status,
[('Content-Type', 'text/plain; charset=utf8'),
('Location', location)])
return []
# If the route included a path_info attribute and it should be used to
# alter the environ, we'll pull it out
if self.path_info and 'path_info' in match:
oldpath = environ['PATH_INFO']
newpath = match.get('path_info') or ''
environ['PATH_INFO'] = newpath
if not environ['PATH_INFO'].startswith('/'):
environ['PATH_INFO'] = '/' + environ['PATH_INFO']
environ['SCRIPT_NAME'] += re.sub(
r'^(.*?)/' + re.escape(newpath) + '$', r'\1', oldpath)
response = self.app(environ, start_response)
# Wrapped in try as in rare cases the attribute will be gone already
try:
del self.mapper.environ
except AttributeError:
pass
return response
def is_form_post(environ):
"""Determine whether the request is a POSTed html form"""
content_type = environ.get('CONTENT_TYPE', '').lower()
if ';' in content_type:
content_type = content_type.split(';', 1)[0]
return content_type in ('application/x-www-form-urlencoded',
'multipart/form-data')
| 38.217647 | 79 | 0.548561 |
2c6de910dec683fe321abba1a5cb83d93f357514 | 13,605 | py | Python | src/wmdecompose/utils.py | maybemkl/wmdecompose | 25d81616aeb6a27cd0511d1e12316bc63673e599 | [
"MIT"
] | 3 | 2022-03-22T00:49:02.000Z | 2022-03-24T15:31:39.000Z | src/wmdecompose/utils.py | maybemkl/wmdecompose | 25d81616aeb6a27cd0511d1e12316bc63673e599 | [
"MIT"
] | null | null | null | src/wmdecompose/utils.py | maybemkl/wmdecompose | 25d81616aeb6a27cd0511d1e12316bc63673e599 | [
"MIT"
] | null | null | null | from .documents import Document
from .models import WMD
from .gale_shapeley import Matcher
from bs4 import BeautifulSoup
from collections import Counter
from gensim.models.phrases import Phrases, Phraser
from itertools import islice
from nltk.stem import WordNetLemmatizer
from nltk.tokenize.toktok import ToktokTokenizer
from random import shuffle
from scipy.sparse.csr import csr_matrix
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from typing import Callable, DefaultDict, Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import seaborn as sns
def get_pairs(pairing:str = 'gs',
source_docs:List[Document] = [],
sink_docs:List[Document] = [],
engaged:Dict[int,int] = {}) -> List[Tuple[int, int]]:
"""Wrapper for extracting different types of pairs, using the Galey-Shapeley algorithm, random pairing or full pairing.
Args:
pairing: String defining what pairing to use. Alternatives are 'gs' for Gale-Shapeley, 'random' or 'full'.
source_docs: A list of the documents in the source set.
sink_docs: A list of the documents in the sink set.
engaged: Dictionary with the indexes for the final engagements between guys and gals.
Return:
pairs: A list of tuples with a pair of integers in each tuple indicating the paired documents.
"""
if pairing == 'gs':
pairs = [(k, v) for k, v in engaged.items()]
if pairing == 'random':
print("Running random pairing.")
source_idx = list(range(0,len(source_docs)))
sink_idx = list(range(0,len(sink_docs)))
shuffle(source_idx)
shuffle(sink_idx)
pairs = list(zip(source_idx, sink_idx))
if pairing == 'full':
print("Running full pairing.")
source_idx = list(range(0,len(source_docs)))
sink_idx = list(range(0,len(sink_docs)))
pairs = [(i,j) for i in source_idx for j in sink_idx]
return pairs
def get_top_words(wmd_model:WMD,
top_n:int=100,
source:bool=True) -> Tuple[pd.DataFrame, Dict[str,float]]:
"""Function for getting the top words composing the distances from a source set to a sink set or vice versa.
Args:
wmd_model: A WMD model with word decomposing.
top_n: An integer indicating how many words to return.
source: A boolean indicating whether to look for source to sink (True) or sink to source (False).
Return:
top_words: A pandas dataframe with the top n words contributing to the distance from one set of documents to another.
source_to_sink: A dictionary with the top n words that add the most distance from source to sink.
sink_to_source: A dictionary with the top n words that add the most distance from sink to source.
"""
if source:
source_to_sink = {k: v for k, v in sorted(wmd_model.wd_source_diff.items(),
key=lambda item: item[1], reverse=True)[:top_n]}
top_words = pd.DataFrame.from_dict(source_to_sink, orient='index', columns = ["distance"])
top_words['word'] = top_words.index
return top_words, source_to_sink
else:
sink_to_source = {k: v for k, v in sorted(wmd_model.wd_sink_diff.items(),
key=lambda item: item[1], reverse=True)[:top_n]}
top_words = pd.DataFrame.from_dict(sink_to_source, orient='index', columns = ["distance"])
top_words['word'] = top_words.index
return top_words, sink_to_source
def kmeans_search(E:np.array, K:List[int]) -> Tuple[List[float], List[float]]:
"""Grid search for Kmeans models.
Args:
E: An array with an embedding matrix of float values.
K: A list of integers for all the K values that should be searched.
Return:
sum_of_squared_distances: A list of float values with the 'intertia_' variable from Kmeans.
silhouette: A list of float values with the silhouette scores for the Kmeans models at each K.
"""
sum_of_squared_distances = []
silhouette = []
for k in K:
km = KMeans(n_clusters=k,max_iter=300)
km = km.fit(E)
sum_of_squared_distances.append(km.inertia_)
cluster_labels = km.fit_predict(E)
silhouette_avg = silhouette_score(E, cluster_labels)
silhouette.append(silhouette_avg)
if k % 5 == 0:
print("For n_clusters =", k,
"The average silhouette_score is :", silhouette_avg)
return sum_of_squared_distances, silhouette
def plot_kmeans(K:List[int], data:List[float], metric:str, fname:str = "") -> None:
"""Plot silhouette or elbow scores for Kmeans model.
Args:
K: A list of integers for all the K values that should be searched.
data: A list with a float (or int) number for each Kmeans model.
metric: A string with the metric to plot. Must be be 'elbow' or 'silhouette'.
fname: String with filename for saving figure. Optional.
"""
plt.plot(K, data, 'bx-')
plt.xlabel('k')
if metric == "elbow":
plt.ylabel('Sum of squared distances')
plt.title('Elbow Method For Optimal k')
if metric == "silhouette":
plt.ylabel('Silhouette score')
plt.title('Silhouette Score for Optimal k')
elif metric not in ["elbow", "silhouette"]:
print("Please define 'metric' as either 'elbow' or 'silhouette'.")
if len(fname) > 0:
plt.savefig(fname)
else:
plt.show()
def get_phrases(sentences:List[List[str]],
min_count:int=5,
threshold:int=100,
save:bool=False,
load:bool=False,
PATH:str="embeddings/") -> List[List[str]]:
"""Function for generating, saving and loading Phrases using Gensim.
For details, see https://radimrehurek.com/gensim/models/phrases.html
Args:
sentences: A list of strings.
min_count: An integer for the 'min_count' argument in the Gensim Phraser.
threshold: An integer for the 'threshold' argument in the Gensim Phraser.
save: Boolean indicating whether phrases should be saved when generating new phrases.
load: Boolean indicating that whether phrases should be loaded, instead of saved.
PATH: String for path to which save or from which to load phrases.
Return:
phrased_tri: List of lists with the phrased versions of the input sentences.
"""
if load:
bigram=Phrases.load(f"{PATH}bigram_phrases.pkl")
trigram=Phrases.load(f"{PATH}trigram_phrases.pkl")
else:
print("Initializing bigram Phrases.")
bigram = Phrases(sentences, min_count=min_count, threshold = threshold) # higher threshold fewer phrases.
print("Initializing trigram Phrases.")
trigram = Phrases(bigram[sentences])
if save:
print("Saving bigram Phrases.")
bigram.save(f"{PATH}bigram_phrases.pkl")
print("Saving trigram Phrases.")
trigram.save(f"{PATH}trigram_phrases.pkl")
print("Finding bigrams in data.")
phrased_bi = [b for b in bigram[sentences]]
print("Finding trigrams in data.")
phrased_tri = [t for t in trigram[[b for b in bigram[sentences]]]]
return phrased_tri
def output_clusters(wd:List[Tuple[str, float]],
cd:List[Tuple[int, float]],
c2w:DefaultDict[list, Dict[int, List[str]]],
n_clusters:int = 10,
n_words:int = 10) -> pd.DataFrame:
"""Get clusters with highest accumulated distance and with within cluster words organized by distance contribution.
Args:
wd: List of tuples with words and their accumulated distance contributions in each tuple.
cd: List of tuples with clusters and their accumulated distance contributions in each tuple.
c2w: Default dictionary with the cluster number as key and the list of the words in said cluster as value.
n_clusters: Integer with the number of clusters.
n_words: Integer with the number of words per cluster.
Return:
keywords_df: Pandas dataframe with clusters with distances as column headers and words with distances as row values.
"""
top_clusters = [k for k, v in sorted(cd, key=lambda item: item[1], reverse=True)[:n_clusters]]
word_rankings = {k: v for k, v in sorted(wd, key=lambda item: item[1], reverse=True)}
keywords = []
for c in top_clusters:
cluster_words = {w: word_rankings[w] for w in c2w[c]
if w in word_rankings.keys()}
top_c_words = [f"{k} ({round(v, 2)})" for k, v in sorted(cluster_words.items(),
key=lambda item: item[1],
reverse=True)[:n_words]]
keywords.append(top_c_words)
keywords_df = pd.DataFrame(keywords).transpose()
keywords_df.columns = top_clusters
return keywords_df
def remove_oov(text:str, tokenizer:Callable[[List[str]], List[str]], oov:List[str]) -> str:
"""Function for removing out-of-vocabulary (oov) words.
Args:
text: String to be analyzed for oov words.
tokenizer: Any tokenizer that returns input sentence as a list of strings.
oov: List of oov words.
Return:
filtered_text: String with oov words removed.
"""
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
filtered_tokens = [token for token in tokens if token not in oov]
#filtered_tokens = filter(lambda token: token not in oov, tokens)
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def take(n:int, iterable:iter) -> list:
"""Return first n items of the iterable as a list.
Args:
n: An integer of the number of items to keep from iterable.
iterable: Any iterable.
Return:
list: List of items contained in input iterable.
"""
return list(islice(iterable, n))
def tokenize(text:str, tokenizer:Callable[[str], List[str]]) -> List[str]:
"""Callable to use with the Sklearn TfIdfVectorizer.
Args:
text: String to tokenize.
tokenizer: Any callable that takes a string as input and returns a list of strings.
Return:
tokens: List of strings.
"""
tokens = tokenizer.tokenize(text)
return tokens
def tfidf_tokenize(text:str) -> List[str]:
"""Callable to use with the Sklearn TfIdfVectorizer with the tokenizer predetermined as the nltk ToktokTokenizer.
Args:
text: String to tokenize.
Return:
tokens: List of strings.
"""
tokenizer=ToktokTokenizer()
tokens = tokenizer.tokenize(text)
return tokens
# Custom preprocessing functions
# Partly self-authored, partly from https://www.kaggle.com/lakshmi25npathi/sentiment-analysis-of-imdb-movie-reviews
def strip_html(text:str) -> str:
"""Removing the html strips.
Args:
text: String to have HTML removed.
Return:
text: String with HTML removed.
"""
soup = BeautifulSoup(text, "html.parser")
return soup.get_text()
def remove_between_square_brackets(text:str) -> str:
"""Removing the square <brackets
Args:
text: String to have square brackets removed.
Return:
text: String with square brackets removed.
"""
return re.sub('\[[^]]*\]', '', text)
def denoise_text(text:str) -> str:
"""Removing the noisy text
Args:
text: String to denoise for HTML.
Return:
text: String with HTML denoised.
"""
text = re.sub('<br / ><br / >', ' ', text)
text = strip_html(text)
text = remove_between_square_brackets(text)
return text
def remove_special_characters(text:str) -> str:
"""Define function for removing special characters
Args:
text: String to filter for special characters.
Return:
text: String with special characters removed.
"""
pattern=r'[^a-zA-z\s]'
text=re.sub(pattern,'',text)
return text
def simple_lemmatizer(text:str) -> str:
"""Lemmatizing the text.
Args:
text: String to lemmatize.
Return:
text: String that has been lemmatized.
"""
lemmatizer=WordNetLemmatizer()
text= ' '.join([lemmatizer.lemmatize(word) for word in text.split()])
return text
def remove_stopwords(text:str,
stopword_list:List[str],
tokenizer:Callable[[str], List[str]],
is_lower_case:bool=False) -> str:
"""Removing the stopwords.
Args:
text: String to filter for stopwords.
stopword_list: List of strings with stopwords.
tokenizer: Any callable that takes a string as input and returns a list of strings.
is_lower_case: Boolean indicating whether input is alread lower case.
Return:
filtered_text: String with stopwords removed.
"""
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
if is_lower_case:
filtered_tokens = [token for token in tokens if token not in stopword_list]
else:
filtered_tokens = [token.lower() for token in tokens if token.lower() not in stopword_list]
filtered_text = ' '.join(filtered_tokens)
return filtered_text | 36.28 | 123 | 0.642631 |
6554a98a45904e373be3d59a45a20274ea71aaf5 | 4,630 | py | Python | simulation_research/traffic/point_process_model_test.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | simulation_research/traffic/point_process_model_test.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | simulation_research/traffic/point_process_model_test.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import logging
from absl.testing import absltest
import matplotlib.pyplot as plt
import numpy as np
from simulation_research.traffic import point_process_model
class PointProcessModelTest(absltest.TestCase):
def setUp(self):
super(PointProcessModelTest, self).setUp()
self.model = point_process_model.PointProcessModel()
np.random.seed(0)
self._output_dir = tempfile.mkdtemp(dir=absltest.get_default_test_tmpdir())
def test_generator_homogeneous_poisson(self):
lmbd = 1
time_step_size = 10
rates = np.ones(100000) * lmbd
events = self.model.generator(rates, time_step_size)
actual_mean = np.mean(events)
actual_std = np.std(events)
target_mean = lmbd * time_step_size
target_std = np.sqrt(target_mean)
self.assertAlmostEqual(
np.abs(target_mean - actual_mean) / target_mean, 0, places=2)
self.assertAlmostEqual(
np.abs(target_std - actual_std) / target_std, 0, places=2)
def test_model_fitting_homogeneous_poisson(self):
lmbd = 1
time_step_size = 10
rates = np.ones(100000) * lmbd
events = self.model.generator(rates, time_step_size)
actual_lmbd = self.model.fit_homo_poisson(events, time_step_size)
self.assertAlmostEqual(actual_lmbd, lmbd, places=2)
def test_bspline_basis(self):
spline_order = 3
knots = [0, 0.333, 0.6667, 1]
basis, time_line = point_process_model.create_bspline_basis(
knots, spline_order)
fig = plt.figure(figsize=(8, 6))
fig.add_subplot(111)
plt.plot(time_line, basis)
output_file = os.path.join(self._output_dir, 'B-spline_basis_order3.png')
logging.info('Save file in: %s', self._output_dir)
plt.savefig(output_file)
plt.close()
spline_order = 2
knots = [0, 0.2, 0.4, 0.6, 0.8, 1]
basis, time_line = point_process_model.create_bspline_basis(
knots, spline_order)
fig = plt.figure(figsize=(8, 6))
fig.add_subplot(111)
plt.plot(time_line, basis)
output_file = os.path.join(self._output_dir, 'B-spline_basis_order2.png')
logging.info('Save file in: %s', self._output_dir)
plt.savefig(output_file)
plt.close()
def test_fit_inhomo_poisson(self):
# Fit the homogeneous Poisson process using inhomogeneous methods.
lmbd = 1
time_step_size = 10
rates = np.ones(50) * lmbd
events = self.model.generator(rates, time_step_size)
rates_hat = self.model.fit_inhomo_poisson(
events, time_step_size, num_knots=3)
fig = plt.figure(figsize=(8, 6))
fig.add_subplot(111)
plt.plot(rates, label='True rate')
plt.plot(rates_hat, label='Estimated rate')
output_file = os.path.join(
self._output_dir, 'inhomo model fit homo process.png')
plt.ylim(-1, 3)
plt.legend()
logging.info('Save file in: %s', self._output_dir)
plt.savefig(output_file)
plt.close()
# Fit the inhomogeneous Poisson process.
spline_order = 3
knots = [0, 0.2, 0.4, 0.6, 0.8, 1] # knots on the unit range.
basis, _ = point_process_model.create_bspline_basis(
knots, spline_order, 0.02)
beta_target = np.array([0, 0, 0, 2, 1, -1, 0, 0, 2])
time_step_size = 10
rates_target = np.exp(basis @ beta_target) / time_step_size
# Generates events according to the inhomogeneous rates.
events = self.model.generator(rates_target, time_step_size)
rates_hat = self.model.fit_inhomo_poisson(
events, time_step_size, num_knots=4)
fig = plt.figure(figsize=(8, 6))
fig.add_subplot(111)
plt.plot(rates_target, label='True rate')
plt.plot(rates_hat, label='Estimated rate')
output_file = os.path.join(
self._output_dir, 'inhomo model fit inhomo process.png')
plt.ylim(-2, 5)
plt.legend()
logging.info('Save file in: %s', self._output_dir)
plt.savefig(output_file)
plt.close()
if __name__ == '__main__':
absltest.main()
| 33.79562 | 79 | 0.707559 |
66dfc828ed5c69b6f8e3cb47bb8f171b04624ec3 | 27,657 | py | Python | tests/queue_counter_test.py | sg893052/sonic-utilities | fdb79b8d65b8ca22232f4e6b140f593dd01613d5 | [
"Apache-2.0"
] | 91 | 2016-03-23T14:24:41.000Z | 2022-03-18T20:25:37.000Z | tests/queue_counter_test.py | sg893052/sonic-utilities | fdb79b8d65b8ca22232f4e6b140f593dd01613d5 | [
"Apache-2.0"
] | 1,495 | 2017-02-15T10:49:10.000Z | 2022-03-31T18:49:56.000Z | tests/queue_counter_test.py | sg893052/sonic-utilities | fdb79b8d65b8ca22232f4e6b140f593dd01613d5 | [
"Apache-2.0"
] | 466 | 2016-04-25T09:31:23.000Z | 2022-03-31T06:54:17.000Z | import imp
import json
import os
import sys
from click.testing import CliRunner
from unittest import TestCase
from swsscommon.swsscommon import ConfigDBConnector
from .mock_tables import dbconnector
import show.main as show
from utilities_common.cli import json_dump
from utilities_common.db import Db
test_path = os.path.dirname(os.path.abspath(__file__))
modules_path = os.path.dirname(test_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, test_path)
sys.path.insert(0, modules_path)
show_queue_counters = """\
Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes
--------- ----- -------------- --------------- ----------- ------------
Ethernet0 UC0 68 30 56 74
Ethernet0 UC1 60 43 39 1
Ethernet0 UC2 82 7 39 21
Ethernet0 UC3 52 70 19 76
Ethernet0 UC4 11 59 12 94
Ethernet0 UC5 36 62 35 40
Ethernet0 UC6 49 91 2 88
Ethernet0 UC7 33 17 94 74
Ethernet0 UC8 40 71 95 33
Ethernet0 UC9 54 8 93 78
Ethernet0 MC10 83 96 74 9
Ethernet0 MC11 15 60 61 31
Ethernet0 MC12 45 52 82 94
Ethernet0 MC13 55 88 89 52
Ethernet0 MC14 14 70 95 79
Ethernet0 MC15 68 60 66 81
Ethernet0 MC16 63 4 48 76
Ethernet0 MC17 41 73 77 74
Ethernet0 MC18 60 21 56 54
Ethernet0 MC19 57 31 12 39
Ethernet0 ALL20 N/A N/A N/A N/A
Ethernet0 ALL21 N/A N/A N/A N/A
Ethernet0 ALL22 N/A N/A N/A N/A
Ethernet0 ALL23 N/A N/A N/A N/A
Ethernet0 ALL24 N/A N/A N/A N/A
Ethernet0 ALL25 N/A N/A N/A N/A
Ethernet0 ALL26 N/A N/A N/A N/A
Ethernet0 ALL27 N/A N/A N/A N/A
Ethernet0 ALL28 N/A N/A N/A N/A
Ethernet0 ALL29 N/A N/A N/A N/A
Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes
--------- ----- -------------- --------------- ----------- ------------
Ethernet4 UC0 41 96 70 98
Ethernet4 UC1 18 49 63 36
Ethernet4 UC2 99 90 3 15
Ethernet4 UC3 60 89 48 41
Ethernet4 UC4 8 84 82 94
Ethernet4 UC5 83 15 75 92
Ethernet4 UC6 84 26 50 71
Ethernet4 UC7 27 19 49 80
Ethernet4 UC8 13 89 13 33
Ethernet4 UC9 43 48 86 31
Ethernet4 MC10 50 1 57 82
Ethernet4 MC11 67 99 84 59
Ethernet4 MC12 4 58 27 5
Ethernet4 MC13 74 5 57 39
Ethernet4 MC14 21 59 4 14
Ethernet4 MC15 24 61 19 53
Ethernet4 MC16 51 15 15 32
Ethernet4 MC17 98 18 23 15
Ethernet4 MC18 41 34 9 57
Ethernet4 MC19 57 7 18 99
Ethernet4 ALL20 N/A N/A N/A N/A
Ethernet4 ALL21 N/A N/A N/A N/A
Ethernet4 ALL22 N/A N/A N/A N/A
Ethernet4 ALL23 N/A N/A N/A N/A
Ethernet4 ALL24 N/A N/A N/A N/A
Ethernet4 ALL25 N/A N/A N/A N/A
Ethernet4 ALL26 N/A N/A N/A N/A
Ethernet4 ALL27 N/A N/A N/A N/A
Ethernet4 ALL28 N/A N/A N/A N/A
Ethernet4 ALL29 N/A N/A N/A N/A
Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes
--------- ----- -------------- --------------- ----------- ------------
Ethernet8 UC0 19 5 36 56
Ethernet8 UC1 38 17 68 91
Ethernet8 UC2 16 65 79 51
Ethernet8 UC3 11 97 63 72
Ethernet8 UC4 54 89 62 62
Ethernet8 UC5 13 84 30 59
Ethernet8 UC6 49 67 99 85
Ethernet8 UC7 2 63 38 88
Ethernet8 UC8 0 82 93 43
Ethernet8 UC9 80 17 91 61
Ethernet8 MC10 81 63 76 73
Ethernet8 MC11 29 16 29 66
Ethernet8 MC12 32 12 61 35
Ethernet8 MC13 79 17 72 93
Ethernet8 MC14 23 21 67 50
Ethernet8 MC15 37 10 97 14
Ethernet8 MC16 30 17 74 43
Ethernet8 MC17 0 63 54 84
Ethernet8 MC18 69 88 24 79
Ethernet8 MC19 20 12 84 3
Ethernet8 ALL20 N/A N/A N/A N/A
Ethernet8 ALL21 N/A N/A N/A N/A
Ethernet8 ALL22 N/A N/A N/A N/A
Ethernet8 ALL23 N/A N/A N/A N/A
Ethernet8 ALL24 N/A N/A N/A N/A
Ethernet8 ALL25 N/A N/A N/A N/A
Ethernet8 ALL26 N/A N/A N/A N/A
Ethernet8 ALL27 N/A N/A N/A N/A
Ethernet8 ALL28 N/A N/A N/A N/A
Ethernet8 ALL29 N/A N/A N/A N/A
"""
show_queue_counters_port = """\
Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes
--------- ----- -------------- --------------- ----------- ------------
Ethernet8 UC0 19 5 36 56
Ethernet8 UC1 38 17 68 91
Ethernet8 UC2 16 65 79 51
Ethernet8 UC3 11 97 63 72
Ethernet8 UC4 54 89 62 62
Ethernet8 UC5 13 84 30 59
Ethernet8 UC6 49 67 99 85
Ethernet8 UC7 2 63 38 88
Ethernet8 UC8 0 82 93 43
Ethernet8 UC9 80 17 91 61
Ethernet8 MC10 81 63 76 73
Ethernet8 MC11 29 16 29 66
Ethernet8 MC12 32 12 61 35
Ethernet8 MC13 79 17 72 93
Ethernet8 MC14 23 21 67 50
Ethernet8 MC15 37 10 97 14
Ethernet8 MC16 30 17 74 43
Ethernet8 MC17 0 63 54 84
Ethernet8 MC18 69 88 24 79
Ethernet8 MC19 20 12 84 3
Ethernet8 ALL20 N/A N/A N/A N/A
Ethernet8 ALL21 N/A N/A N/A N/A
Ethernet8 ALL22 N/A N/A N/A N/A
Ethernet8 ALL23 N/A N/A N/A N/A
Ethernet8 ALL24 N/A N/A N/A N/A
Ethernet8 ALL25 N/A N/A N/A N/A
Ethernet8 ALL26 N/A N/A N/A N/A
Ethernet8 ALL27 N/A N/A N/A N/A
Ethernet8 ALL28 N/A N/A N/A N/A
Ethernet8 ALL29 N/A N/A N/A N/A
"""
show_queue_counters_json = """\
{
"Ethernet0": {
"ALL20": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL21": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL22": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL23": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL24": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL25": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL26": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL27": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL28": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL29": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"MC10": {
"dropbytes": "9",
"droppacket": "74",
"totalbytes": "96",
"totalpacket": "83"
},
"MC11": {
"dropbytes": "31",
"droppacket": "61",
"totalbytes": "60",
"totalpacket": "15"
},
"MC12": {
"dropbytes": "94",
"droppacket": "82",
"totalbytes": "52",
"totalpacket": "45"
},
"MC13": {
"dropbytes": "52",
"droppacket": "89",
"totalbytes": "88",
"totalpacket": "55"
},
"MC14": {
"dropbytes": "79",
"droppacket": "95",
"totalbytes": "70",
"totalpacket": "14"
},
"MC15": {
"dropbytes": "81",
"droppacket": "66",
"totalbytes": "60",
"totalpacket": "68"
},
"MC16": {
"dropbytes": "76",
"droppacket": "48",
"totalbytes": "4",
"totalpacket": "63"
},
"MC17": {
"dropbytes": "74",
"droppacket": "77",
"totalbytes": "73",
"totalpacket": "41"
},
"MC18": {
"dropbytes": "54",
"droppacket": "56",
"totalbytes": "21",
"totalpacket": "60"
},
"MC19": {
"dropbytes": "39",
"droppacket": "12",
"totalbytes": "31",
"totalpacket": "57"
},
"UC0": {
"dropbytes": "74",
"droppacket": "56",
"totalbytes": "30",
"totalpacket": "68"
},
"UC1": {
"dropbytes": "1",
"droppacket": "39",
"totalbytes": "43",
"totalpacket": "60"
},
"UC2": {
"dropbytes": "21",
"droppacket": "39",
"totalbytes": "7",
"totalpacket": "82"
},
"UC3": {
"dropbytes": "76",
"droppacket": "19",
"totalbytes": "70",
"totalpacket": "52"
},
"UC4": {
"dropbytes": "94",
"droppacket": "12",
"totalbytes": "59",
"totalpacket": "11"
},
"UC5": {
"dropbytes": "40",
"droppacket": "35",
"totalbytes": "62",
"totalpacket": "36"
},
"UC6": {
"dropbytes": "88",
"droppacket": "2",
"totalbytes": "91",
"totalpacket": "49"
},
"UC7": {
"dropbytes": "74",
"droppacket": "94",
"totalbytes": "17",
"totalpacket": "33"
},
"UC8": {
"dropbytes": "33",
"droppacket": "95",
"totalbytes": "71",
"totalpacket": "40"
},
"UC9": {
"dropbytes": "78",
"droppacket": "93",
"totalbytes": "8",
"totalpacket": "54"
}
},
"Ethernet4": {
"ALL20": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL21": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL22": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL23": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL24": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL25": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL26": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL27": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL28": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL29": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"MC10": {
"dropbytes": "82",
"droppacket": "57",
"totalbytes": "1",
"totalpacket": "50"
},
"MC11": {
"dropbytes": "59",
"droppacket": "84",
"totalbytes": "99",
"totalpacket": "67"
},
"MC12": {
"dropbytes": "5",
"droppacket": "27",
"totalbytes": "58",
"totalpacket": "4"
},
"MC13": {
"dropbytes": "39",
"droppacket": "57",
"totalbytes": "5",
"totalpacket": "74"
},
"MC14": {
"dropbytes": "14",
"droppacket": "4",
"totalbytes": "59",
"totalpacket": "21"
},
"MC15": {
"dropbytes": "53",
"droppacket": "19",
"totalbytes": "61",
"totalpacket": "24"
},
"MC16": {
"dropbytes": "32",
"droppacket": "15",
"totalbytes": "15",
"totalpacket": "51"
},
"MC17": {
"dropbytes": "15",
"droppacket": "23",
"totalbytes": "18",
"totalpacket": "98"
},
"MC18": {
"dropbytes": "57",
"droppacket": "9",
"totalbytes": "34",
"totalpacket": "41"
},
"MC19": {
"dropbytes": "99",
"droppacket": "18",
"totalbytes": "7",
"totalpacket": "57"
},
"UC0": {
"dropbytes": "98",
"droppacket": "70",
"totalbytes": "96",
"totalpacket": "41"
},
"UC1": {
"dropbytes": "36",
"droppacket": "63",
"totalbytes": "49",
"totalpacket": "18"
},
"UC2": {
"dropbytes": "15",
"droppacket": "3",
"totalbytes": "90",
"totalpacket": "99"
},
"UC3": {
"dropbytes": "41",
"droppacket": "48",
"totalbytes": "89",
"totalpacket": "60"
},
"UC4": {
"dropbytes": "94",
"droppacket": "82",
"totalbytes": "84",
"totalpacket": "8"
},
"UC5": {
"dropbytes": "92",
"droppacket": "75",
"totalbytes": "15",
"totalpacket": "83"
},
"UC6": {
"dropbytes": "71",
"droppacket": "50",
"totalbytes": "26",
"totalpacket": "84"
},
"UC7": {
"dropbytes": "80",
"droppacket": "49",
"totalbytes": "19",
"totalpacket": "27"
},
"UC8": {
"dropbytes": "33",
"droppacket": "13",
"totalbytes": "89",
"totalpacket": "13"
},
"UC9": {
"dropbytes": "31",
"droppacket": "86",
"totalbytes": "48",
"totalpacket": "43"
}
},
"Ethernet8": {
"ALL20": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL21": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL22": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL23": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL24": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL25": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL26": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL27": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL28": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL29": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"MC10": {
"dropbytes": "73",
"droppacket": "76",
"totalbytes": "63",
"totalpacket": "81"
},
"MC11": {
"dropbytes": "66",
"droppacket": "29",
"totalbytes": "16",
"totalpacket": "29"
},
"MC12": {
"dropbytes": "35",
"droppacket": "61",
"totalbytes": "12",
"totalpacket": "32"
},
"MC13": {
"dropbytes": "93",
"droppacket": "72",
"totalbytes": "17",
"totalpacket": "79"
},
"MC14": {
"dropbytes": "50",
"droppacket": "67",
"totalbytes": "21",
"totalpacket": "23"
},
"MC15": {
"dropbytes": "14",
"droppacket": "97",
"totalbytes": "10",
"totalpacket": "37"
},
"MC16": {
"dropbytes": "43",
"droppacket": "74",
"totalbytes": "17",
"totalpacket": "30"
},
"MC17": {
"dropbytes": "84",
"droppacket": "54",
"totalbytes": "63",
"totalpacket": "0"
},
"MC18": {
"dropbytes": "79",
"droppacket": "24",
"totalbytes": "88",
"totalpacket": "69"
},
"MC19": {
"dropbytes": "3",
"droppacket": "84",
"totalbytes": "12",
"totalpacket": "20"
},
"UC0": {
"dropbytes": "56",
"droppacket": "36",
"totalbytes": "5",
"totalpacket": "19"
},
"UC1": {
"dropbytes": "91",
"droppacket": "68",
"totalbytes": "17",
"totalpacket": "38"
},
"UC2": {
"dropbytes": "51",
"droppacket": "79",
"totalbytes": "65",
"totalpacket": "16"
},
"UC3": {
"dropbytes": "72",
"droppacket": "63",
"totalbytes": "97",
"totalpacket": "11"
},
"UC4": {
"dropbytes": "62",
"droppacket": "62",
"totalbytes": "89",
"totalpacket": "54"
},
"UC5": {
"dropbytes": "59",
"droppacket": "30",
"totalbytes": "84",
"totalpacket": "13"
},
"UC6": {
"dropbytes": "85",
"droppacket": "99",
"totalbytes": "67",
"totalpacket": "49"
},
"UC7": {
"dropbytes": "88",
"droppacket": "38",
"totalbytes": "63",
"totalpacket": "2"
},
"UC8": {
"dropbytes": "43",
"droppacket": "93",
"totalbytes": "82",
"totalpacket": "0"
},
"UC9": {
"dropbytes": "61",
"droppacket": "91",
"totalbytes": "17",
"totalpacket": "80"
}
}
}"""
show_queue_counters_port_json = """\
{
"Ethernet8": {
"ALL20": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL21": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL22": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL23": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL24": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL25": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL26": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL27": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL28": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"ALL29": {
"dropbytes": "N/A",
"droppacket": "N/A",
"totalbytes": "N/A",
"totalpacket": "N/A"
},
"MC10": {
"dropbytes": "73",
"droppacket": "76",
"totalbytes": "63",
"totalpacket": "81"
},
"MC11": {
"dropbytes": "66",
"droppacket": "29",
"totalbytes": "16",
"totalpacket": "29"
},
"MC12": {
"dropbytes": "35",
"droppacket": "61",
"totalbytes": "12",
"totalpacket": "32"
},
"MC13": {
"dropbytes": "93",
"droppacket": "72",
"totalbytes": "17",
"totalpacket": "79"
},
"MC14": {
"dropbytes": "50",
"droppacket": "67",
"totalbytes": "21",
"totalpacket": "23"
},
"MC15": {
"dropbytes": "14",
"droppacket": "97",
"totalbytes": "10",
"totalpacket": "37"
},
"MC16": {
"dropbytes": "43",
"droppacket": "74",
"totalbytes": "17",
"totalpacket": "30"
},
"MC17": {
"dropbytes": "84",
"droppacket": "54",
"totalbytes": "63",
"totalpacket": "0"
},
"MC18": {
"dropbytes": "79",
"droppacket": "24",
"totalbytes": "88",
"totalpacket": "69"
},
"MC19": {
"dropbytes": "3",
"droppacket": "84",
"totalbytes": "12",
"totalpacket": "20"
},
"UC0": {
"dropbytes": "56",
"droppacket": "36",
"totalbytes": "5",
"totalpacket": "19"
},
"UC1": {
"dropbytes": "91",
"droppacket": "68",
"totalbytes": "17",
"totalpacket": "38"
},
"UC2": {
"dropbytes": "51",
"droppacket": "79",
"totalbytes": "65",
"totalpacket": "16"
},
"UC3": {
"dropbytes": "72",
"droppacket": "63",
"totalbytes": "97",
"totalpacket": "11"
},
"UC4": {
"dropbytes": "62",
"droppacket": "62",
"totalbytes": "89",
"totalpacket": "54"
},
"UC5": {
"dropbytes": "59",
"droppacket": "30",
"totalbytes": "84",
"totalpacket": "13"
},
"UC6": {
"dropbytes": "85",
"droppacket": "99",
"totalbytes": "67",
"totalpacket": "49"
},
"UC7": {
"dropbytes": "88",
"droppacket": "38",
"totalbytes": "63",
"totalpacket": "2"
},
"UC8": {
"dropbytes": "43",
"droppacket": "93",
"totalbytes": "82",
"totalpacket": "0"
},
"UC9": {
"dropbytes": "61",
"droppacket": "91",
"totalbytes": "17",
"totalpacket": "80"
}
}
}"""
class TestQueue(object):
@classmethod
def setup_class(cls):
os.environ["PATH"] += os.pathsep + scripts_path
os.environ['UTILITIES_UNIT_TESTING'] = "2"
print("SETUP")
def test_queue_counters(self):
runner = CliRunner()
result = runner.invoke(
show.cli.commands["queue"].commands["counters"],
[]
)
print(result.output)
assert result.exit_code == 0
assert result.output == show_queue_counters
def test_queue_counters_port(self):
runner = CliRunner()
result = runner.invoke(
show.cli.commands["queue"].commands["counters"],
["Ethernet8"]
)
print(result.output)
assert result.exit_code == 0
assert result.output == show_queue_counters_port
def test_queue_counters_json(self):
runner = CliRunner()
result = runner.invoke(
show.cli.commands["queue"].commands["counters"],
["--json"]
)
assert result.exit_code == 0
print(result.output)
json_output = json.loads(result.output)
# remove "time" from the output
for _, v in json_output.items():
del v["time"]
assert json_dump(json_output) == show_queue_counters_json
def test_queue_counters_port_json(self):
runner = CliRunner()
result = runner.invoke(
show.cli.commands["queue"].commands["counters"],
["Ethernet8 --json"]
)
assert result.exit_code == 0
print(result.output)
json_output = json.loads(result.output)
# remove "time" from the output
for _, v in json_output.items():
del v["time"]
assert json_dump(json_output) == show_queue_counters_port_json
@classmethod
def teardown_class(cls):
os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1])
os.environ['UTILITIES_UNIT_TESTING'] = "0"
print("TEARDOWN")
| 28.779396 | 87 | 0.385038 |
b2839bd068bd5a47d7bd6c5a2af555b2e4572333 | 2,904 | py | Python | schwifty/registry.py | figo-connect/schwifty | 1fbcca7a383f0eb2d38ff0c4c30ee81b3538ec7e | [
"MIT"
] | 44 | 2016-03-07T11:18:52.000Z | 2019-10-29T13:06:39.000Z | schwifty/registry.py | figo-connect/schwifty | 1fbcca7a383f0eb2d38ff0c4c30ee81b3538ec7e | [
"MIT"
] | 11 | 2016-05-23T12:27:27.000Z | 2019-11-26T10:29:35.000Z | schwifty/registry.py | figo-connect/schwifty | 1fbcca7a383f0eb2d38ff0c4c30ee81b3538ec7e | [
"MIT"
] | 20 | 2016-11-07T15:27:00.000Z | 2019-11-26T10:32:07.000Z | import itertools
import json
import pathlib
from collections import defaultdict
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
try:
from importlib.resources import files
except ImportError:
from importlib_resources import files # type: ignore
_registry: Dict[str, Union[Dict, List[Dict]]] = {}
def merge_dicts(left: Dict, right: Dict) -> Dict:
merged = {}
for key in frozenset(right) & frozenset(left):
left_value, right_value = left[key], right[key]
if isinstance(left_value, Dict) and isinstance(right_value, Dict):
merged[key] = merge_dicts(left_value, right_value)
else:
merged[key] = right_value
for key, value in itertools.chain(left.items(), right.items()):
if key not in merged:
merged[key] = value
return merged
def has(name: str) -> bool:
return name in _registry
def get(name: str) -> Union[Dict, List[Dict]]:
if not has(name):
data = None
directory = files(__package__) / f"{name}_registry"
assert isinstance(directory, pathlib.Path)
for entry in sorted(directory.glob("*.json")):
with entry.open(encoding="utf-8") as fp:
chunk = json.load(fp)
if data is None:
data = chunk
elif isinstance(data, list):
data.extend(chunk)
elif isinstance(data, dict):
data = merge_dicts(data, chunk)
if data is None:
raise ValueError(f"Failed to load registry {name}")
save(name, data)
return _registry[name]
def save(name: str, data: Union[Dict, List[Dict]]) -> None:
_registry[name] = data
def build_index(
base_name: str,
index_name: str,
key: Union[str, Tuple],
accumulate: bool = False,
**predicate: Any,
) -> None:
def make_key(entry: Dict) -> Union[Tuple, str]:
return tuple(entry[k] for k in key) if isinstance(key, tuple) else entry[key]
def match(entry: dict) -> bool:
return all(entry[key] == value for key, value in predicate.items())
base = get(base_name)
assert isinstance(base, list)
if accumulate:
data = defaultdict(list)
for entry in base:
if not match(entry):
continue
data[make_key(entry)].append(entry)
save(index_name, dict(data))
else:
save(index_name, {make_key(entry): entry for entry in base if match(entry)})
def manipulate(name: str, func: Callable) -> None:
registry = get(name)
if isinstance(registry, dict):
for key, value in registry.items():
registry[key] = func(key, value)
elif isinstance(registry, list):
registry = [func(item) for item in registry]
save(name, registry)
| 29.333333 | 85 | 0.618802 |
df92cae3f7c4e76e3df47b4f22f694248273f296 | 7,883 | py | Python | src/preprocessing/utils.py | farahhuifanyang/OpenRE-via-qa-multi-head | 8946bef835912b438e7d511f8cd55c18b91c4442 | [
"CC-BY-4.0"
] | 3 | 2022-03-01T12:45:40.000Z | 2022-03-15T03:00:44.000Z | src/preprocessing/utils.py | farahhuifanyang/QuORE | 8946bef835912b438e7d511f8cd55c18b91c4442 | [
"CC-BY-4.0"
] | null | null | null | src/preprocessing/utils.py | farahhuifanyang/QuORE | 8946bef835912b438e7d511f8cd55c18b91c4442 | [
"CC-BY-4.0"
] | null | null | null | import html
import re
import json
import os
from typing import Dict, List, Tuple, Any
from allennlp.data.tokenizers import Token
SPAN_ANSWER_TYPE = 'spans'
SINGLE_SPAN = 'single_span'
MULTIPLE_SPAN = 'multiple_span'
NO_SPAN = 'no_span'
SPAN_ANSWER_TYPES = [SINGLE_SPAN, MULTIPLE_SPAN, NO_SPAN]
ALL_ANSWER_TYPES = SPAN_ANSWER_TYPES
def load_dataset(path):
with open(path) as dataset_file:
return json.load(dataset_file)
def save_dataset(dataset, path):
with open(path, 'w') as f:
json.dump(dataset, f, indent=4)
def get_answer_type(answer):
if answer['spans']:
if len(answer['spans']) == 1:
return SINGLE_SPAN
else:
return MULTIPLE_SPAN
else:
return NO_SPAN
def find_span(answer_tokens: List[Token], qp_token_indices: Dict[Token, List[int]],
num_qp_tokens) -> List[Tuple[int, int]]:
num_answer_tokens = len(answer_tokens)
span = []
for span_start in qp_token_indices[answer_tokens[0]]:
if num_answer_tokens == 1:
span.append((span_start, span_start))
elif span_start + num_answer_tokens - 1 <= num_qp_tokens:
for i, answer_token in enumerate(answer_tokens[1:], 1):
if not span_start + i in qp_token_indices[answer_token]:
break
else:
span_end = span_start + i # span_end is inclusive
span.append((span_start, span_end))
return span
def deep_dict_update(d, u):
# based on https://stackoverflow.com/a/3233356/2133678
for k, v in u.items():
dv = d.get(k, {})
if not isinstance(dv, dict):
d[k] = v
elif isinstance(v, dict):
d[k] = deep_dict_update(dv, v)
else:
d[k] = v
return d
def fill_token_indices(tokens, full_text, uncased, basic_tokenizer, word_tokens=None):
if uncased:
full_text = full_text.lower()
new_tokens = []
has_unknowns = False
temp_text = full_text
reconstructed_full_text = ''
absolute_index = 0
for i, token in enumerate(tokens):
token_text = token.text
token_text_to_search = token_text[2:] if len(token_text) > 2 and token_text[:2] == "##" else token_text
if token_text == '[UNK]':
new_tokens.append(Token(text = token_text, lemma_ = token_text, idx=absolute_index))
# lemma as placeholder, index to search from later
has_unknowns = True
continue
relative_index = basic_tokenizer._run_strip_accents(temp_text).find(token_text_to_search)
start_idx = absolute_index + relative_index
end_idx = start_idx + len(token_text_to_search) # exclusive
absolute_index = end_idx
token_source_text = full_text[start_idx : end_idx]
first_part = temp_text[:relative_index]
second_part = token_source_text
reconstructed_full_text += first_part + second_part
temp_text = temp_text[relative_index + len(token_source_text):]
new_tokens.append(Token(text = token_text, lemma_ = token_source_text, idx = start_idx))
if has_unknowns:
reconstructed_full_text = ''
word_tokens_text = ' '.join([word_token.text for word_token in word_tokens]) if word_tokens is not None else full_text
basic_tokens, j, constructed_token = basic_tokenizer.tokenize(word_tokens_text), 0, ''
padding_idx = 0
for i, token in enumerate(new_tokens):
if token.text != '[UNK]':
constructed_token += token.lemma_
if constructed_token == basic_tokens[j]:
constructed_token = ''
j += 1
else:
relative_index = basic_tokenizer._run_strip_accents(full_text[token.idx:]).find(basic_tokens[j])
new_tokens[i] = Token(text = token.text, lemma_ = basic_tokens[j], idx = token.idx + relative_index)
j += 1
padding = full_text[padding_idx : new_tokens[i].idx]
reconstructed_full_text += padding + full_text[new_tokens[i].idx : new_tokens[i].idx + len(new_tokens[i].lemma_)]
padding_idx = new_tokens[i].idx + len(new_tokens[i].lemma_)
# Will happen in very rare cases due to accents stripping changing the length of the word
#if reconstructed_full_text != full_text:
# raise Exception('Error with token indices')
return new_tokens
def token_to_span(token):
start = token.idx
end = token.idx + len(token.lemma_)
return (start, end)
def standardize_dataset(dataset):
for passage_info in dataset.values():
passage_info['passage'] = standardize_text(passage_info['passage'])
for qa_pair in passage_info["qa_pairs"]:
qa_pair['question'] = standardize_text(qa_pair['question'])
answer = qa_pair['answer']
if 'answer' in qa_pair:
if 'spans' in answer:
answer['spans'] = [standardize_text(span) for span in answer['spans']]
if 'validated_answers' in qa_pair:
for validated_answer in qa_pair['validated_answers']:
if 'spans' in answer:
validated_answer['spans'] = [standardize_text(span) for span in validated_answer['spans']]
return dataset
def standardize_text(text):
# I don't see a reason to differentiate between "No-Break Space" and regular space
text = text.replace(' ', ' ')
text = html.unescape(text)
# There is a pattern that repeats itself 97 times in the train set and 16 in the
# dev set: "<letters>.:<digits>". It originates from the method of parsing the
# Wikipedia pages. In such an occurrence, "<letters>." is the last word of a
# sentence, followed by a period. Then, in the wikipedia page, follows a superscript
# of digits within square brackets, which is a hyperlink to a reference. After the
# hyperlink there is a colon, ":", followed by <digits>. These digits are the page
# within the reference.
# Example: https://en.wikipedia.org/wiki/Polish%E2%80%93Ottoman_War_(1672%E2%80%931676)
if '.:' in text:
text = re.sub('\.:d+', '\.', text)
return text
import itertools
def get_all_subsequences(full_list):
def contains_sublist(lst, sublst):
n = len(sublst)
return any((list(sublst) == lst[i:i+n]) for i in range(len(lst)-n+1))
subsequences = []
for j in range(len(full_list), 0, -1):
subsequences.extend([' '.join(part) for part in itertools.combinations(full_list, j) if contains_sublist(full_list, part)])
return subsequences
def extract_answer_info_from_annotation(
answer_annotation: Dict[str, Any]
) -> Tuple[str, List[str]]:
answer_type = None
if answer_annotation["spans"]:
answer_type = "spans"
answer_content = answer_annotation[answer_type] if answer_type is not None else None
answer_texts: List[str] = []
if answer_type is None: # No answer
pass
elif answer_type == "spans":
# answer_content is a list of string in this case
answer_texts = answer_content
return answer_type, answer_texts
def create_bio_labels(spans: List[Tuple[int, int]], n_labels: int):
# initialize all labels to O
labels = [0] * n_labels
for span in spans:
start = span[0]
end = span[1]
# create B labels
labels[start] = 1
# create I labels
labels[start+1:end+1] = [2] * (end - start)
return labels
def create_io_labels(spans: List[Tuple[int, int]], n_labels: int):
# initialize all labels to O
labels = [0] * n_labels
for span in spans:
start = span[0]
end = span[1]
# create I labels
labels[start:end+1] = [1] * (end - start + 1)
return labels
| 34.423581 | 131 | 0.636687 |
f3fa372c95f7e44f215ff6caad3e9735574f5dcc | 3,672 | py | Python | examples/03b-hyperopt.py | data-science-lab-amsterdam/skippa | 1349317c441f1e46e22f4c02a8aceae767aea5fe | [
"BSD-3-Clause"
] | 33 | 2021-12-15T22:56:12.000Z | 2022-02-26T12:33:56.000Z | examples/03b-hyperopt.py | data-science-lab-amsterdam/skippa | 1349317c441f1e46e22f4c02a8aceae767aea5fe | [
"BSD-3-Clause"
] | null | null | null | examples/03b-hyperopt.py | data-science-lab-amsterdam/skippa | 1349317c441f1e46e22f4c02a8aceae767aea5fe | [
"BSD-3-Clause"
] | 1 | 2022-01-20T15:41:35.000Z | 2022-01-20T15:41:35.000Z |
"""
Advanced hyperparameter tuning with Hyperopt (if sklearn's GridSearchCV or RandomSearchCV isn't good enough)
this requires pip install hyperopt
How this works:
- Hyperopt requires you to define a function to minimize, that returns the loss for a given parameter set
- this function needs to fit the pipeline using that parameter set
- we can do this using the .set_params() method from sklearn
N.B. Hyperopt is a bit weird with data types, so we need to ensure
"""
import time
import logging
from copy import deepcopy
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from hyperopt.pyll import scope
from skippa import Skippa, columns
from skippa.utils import get_dummy_data
RANDOM_SEED = 123
# get some data
X, y = get_dummy_data(nrows=500, nfloat=3, nchar=1, nint=0, ndate=0, binary_y=False)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=RANDOM_SEED)
# define the pipeline
pipe = (
Skippa()
.impute(columns(dtype_include='number'), strategy='median')
.impute(columns(dtype_include='object'), strategy='most_frequent')
.onehot(columns(dtype_include='object'), handle_unknown='ignore')
.model(RandomForestRegressor())
)
# define parameter search space
param_space = {
'n_estimators': scope.int(hp.quniform('n_estimators', 100, 2000, 1)), # explicitly cast to int, of the RandomForestRegressor will throw a TypeError
'max_depth' : scope.int(hp.quniform('max_depth', 2, 10, 1)),
'max_features': hp.choice('max_features', ['auto', 'sqrt', 'log2'])
}
pipe_param_space = pipe.get_pipeline_params(param_space) # this prepends the pipeline step name to each param name
# define the function to minimize: evaluates single parameter selection
def get_cv_score_for_params(params):
logging.debug(params)
# set parameter values in the pipeline
pipe_copy = deepcopy(pipe)
pipe_copy.set_params(**params)
# get cross-validation score for selected parameter values
cv_folds = KFold(n_splits=3, shuffle=True, random_state=RANDOM_SEED)
score = -1 * cross_val_score(pipe_copy, X_train, y_train, cv=cv_folds, scoring="neg_mean_squared_error", n_jobs=-1).mean()
return {
'loss': score,
'status': STATUS_OK,
'params': params,
'eval_time': time.time()
}
# trials will contain logging information
trials = Trials()
# apply hyperopt's search using the fmin function
best = fmin(
fn=get_cv_score_for_params, # function to minimize
space=pipe_param_space,
algo=tpe.suggest, # optimization algorithm, hyperotp will select its parameters automatically
max_evals=50, # maximum number of iterations
trials=trials, # logging
rstate=np.random.default_rng(RANDOM_SEED)
)
# This is weird: the datatypes are all messed up
# and the pipeline step prefix had been removed from the parameter names
print(best)
# let's fix this:
def results_to_df(trials):
"""Get all the results in a readable format"""
data=[
[float(r['loss'])] + [v for v in r['params'].values()]
for r in trials.results
]
columns=['loss'] + list(trials.results[0]['params'].keys())
return pd.DataFrame(data=data, columns=columns).sort_values(by='loss', ascending=True)
res = results_to_df(trials)
# we can get the params of the best model like this:
best_params = dict(res.iloc[0, 1:])
# train a model (pipeline) on the full training set using the best params
pipe.set_params(**best_params)
pipe.fit(X_train, y_train)
| 34 | 152 | 0.731481 |
75fce19d3e61e662f44f357f7fc52294c410690e | 8,893 | py | Python | ubuntu_init.py | ChallenAi/algos-partice-go | 24a220329189b5b6f34b9d38634006f0598a79f6 | [
"MIT"
] | null | null | null | ubuntu_init.py | ChallenAi/algos-partice-go | 24a220329189b5b6f34b9d38634006f0598a79f6 | [
"MIT"
] | null | null | null | ubuntu_init.py | ChallenAi/algos-partice-go | 24a220329189b5b6f34b9d38634006f0598a79f6 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# ubuntu系列系统初始化脚本
# 必须手动输入的命令
# sudo passwd
# 先安装zsh,再在zsh下使用apt install, 因为bash下安装的pip3在zsh下不好使
# sudo apt install zsh, git
# sudo sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# sudo chsh -s /bin/zsh
# sudo apt install python3
# sudo apt install python3-pip
# quanxian
# sudo chown fengyiai:fengyiai ./.oh*
# sudo chown fengyiai:fengyiai ./.z*
# zhunbei de wenjian:
# 1. debs
# 2. tar.gz/xz...
# 3. config= vimrc,zshrc,awesome,i3 ...
# qitapeizhi awesome/i3(.config) /(/etc/X11/app-defaults/XTerm)xterm /
# bu yao shi yong sudo yun xing ben jiaoben
# 建议手动:更新系统内核,并删除旧的内核(一个200m左右)
# 查看存在的内核
# dpkg -l|grep linux-image-
# 更新内核
# apt-get dist-upgrade
# 查看正在当前内核版本
# uname -r
# 删除没用的内核,现在正在使用中的(当前的)千万不能删!!!!!! linux-image-generic 千万不要删除,他不是系统镜像,是完整的系统!!!!
# sudo apt-get purge linux-image-...
# ---------------------------------------------------------------------
# config
import os
init_path = os.getcwd()
deb_path = init_path + '/deb'
tgz_path = init_path + '/tgz'
cfg_path = init_path + '/config'
# --ubuntu_init
# --deb/
# --tgz/
# --config/
# sudo vi /etc/apt/sources.txt (可选)
# 配置ys主题 .zshrc
# ---------------------------------------------------------------------
cmd = 'echo apt-get remove start!'
os.system(cmd)
# 建议使用purge命令代替remove,可以同时删除配置文件,如果下次还要安装该app,可以用remove保留配置文件,simple-scan是扫描仪,onboard是桌面键盘,都不要
apt_purge = ['libreoffice-common', 'unity-webapps-common', 'thunderbird', 'totem', 'rhythmbox', 'empathy', 'brasero', 'simple-scan', 'gnome-mines', 'cheese', 'transmission-common', 'gnome-orca', 'webbrowser-app', 'gnome-sudoku', 'landscape-client-ui-install', 'onboard', 'deja-dup']
def apt_purger(app_name):
cmd = 'sudo apt-get purge -y '+app_name
success = os.system(cmd)
if success != 0:
print("=> %s is not correctly removed or not installed(not need to remove)!" %app_name)
return app_name
return
apt_failure = list(map(apt_purger,apt_purge))
# ---------------------------------------------------------------------
# init: update;upgrade | remove useless pkg | autoremove useless relay
os.system('echo apt-get auto-remove start!')
# 升级apt-get源
os.system('sudo apt-get update -y')
os.system('sudo apt-get upgrade -y')
# 已卸载软件的安装包
os.system('sudo apt autoclean -y')
# 未卸载软件的安装包
os.system('sudo apt clean -y')
# 清理系统不再需要的孤立的软件包
os.system('sudo apt autoremove -y')
# ---------------------------------------------------------------------
cmd = 'echo apt-get install start!'
os.system(cmd)
# 视频播放器有smplayer/vlc/mpv三种选择,synaptic这个包管理器在卸载很好用
# apt_install = ['git', 'gdebi', 'vim', 'scrot', 'smplayer', 'synaptic']
apt_install = ['gdebi', 'vim', 'scrot']
def apt_installer(app_name):
cmd = 'sudo apt-get -y install '+app_name
success = os.system(cmd)
if success != 0:
print("=> %s is not correctly installed!" %app_name)
return app_name
return
apt_failure = list(map(apt_installer,apt_install))
# ---------------------------------------------------------------------
# gdebi install,安装./deb下所有的
cmd = 'echo gdebi install start!'
os.system(cmd)
gdebi_install = os.listdir(deb_path)
def gdebi_installer(app_name):
cmd = 'sudo gdebi -n '+app_name
success = os.system(cmd)
if success != 0:
print("=> %s is not correctly installed!" %app_name)
return app_name
return
os.chdir(deb_path)
gdebi_failure = list(map(gdebi_installer, gdebi_install))
# ---------------------------------------------------------------------
# 压缩包安装,安装./tgz下所有的
# tgz_install = ['node/npm', 'golang', 'jdk', 'mongodb', 'redis' ]
cmd = 'echo tar/rar/... install start!'
os.system(cmd)
tgz_install = os.listdir(tgz_path)
def tgz_installer(app_name):
if app_name.endswith('.tar.gz'): cmd = 'tar -zxvf '+app_name
elif app_name.endswith('.tar'): cmd = 'tar -xvf '+app_name
elif app_name.endswith('.tar.xz'): cmd = 'tar -xJf '+app_name
elif app_name.endswith('.tar.bz2'): cmd = 'tar -jxvf '+app_name
elif app_name.endswith('.tar.Z'): cmd = 'tar -Zxvf '+app_name
elif app_name.endswith('.7z'): cmd = '7z x '+app_name
elif app_name.endswith('.rar'): cmd = '7z x '+app_name
else:
print('=> bad file: %s' %app_name)
return
success = os.system(cmd)
if success != 0:
print("=> %s is not correctly installed!" %app_name)
return app_name
return
os.chdir(tgz_path)
tgz_failure = list(map(tgz_installer, tgz_install))
# if no software dir , it should mkdir
# os.system('mkdir ~/software')
cp_files = [f for f in os.listdir() if os.path.isdir(f)]
# list 驱动map惰性求值
list(map((lambda f:os.system('cp -r ./%s ~/software' %f)), cp_files))
os.chdir(cfg_path)
# os.system('sudo cp ./bin/* /usr/local/bin')
os.system('sudo chown fengyiai:1000 /usr/local/bin/*')
# 需要改进,不应直接覆盖 /usr/local/bin 目录
# 如何软链,bin的目录不同,且可能没有bin:ln -s /home/fengyiai/software/xx/bin/* /usr/local/bin/*
# ---------------------------------------------------------------------
# npm
cmd = 'echo npm install start!'
os.system(cmd)
# npm_install = ['react-native-cli', 'vue-cli', 'yarn', 'babel', 'webpack', 'cnpm']
npm_install = ['create-react-app', 'vue-cli', 'yarn', 'cnpm']
def npm_installer(app_name):
cmd = 'sudo npm i -g '+app_name
success = os.system(cmd)
if success != 0:
print("=> %s is not correctly installed!" %app_name)
return app_name
return
npm_failure = list(map(npm_installer, npm_install))
# ---------------------------------------------------------------------
# pip3
cmd = 'echo pip install start!'
os.system(cmd)
# pip3_install = ['lxml', 'beautifulsoup4', 'jupyter', 'numpy', 'matplotlib', 'scipy', 'pandas', 'scikit-learn', 'virtualenv']
pip3_install = ['virtualenv']
def pip3_installer(app_name):
cmd = 'sudo pip3 install '+app_name
success = os.system(cmd)
if success != 0:
print("=> %s is not correctly installed!" %app_name)
return app_name
return
pip3_failure = list(map(pip3_installer, pip3_install))
# ---------------------------------------------------------------------
# config: hosts | zshrc | zsh_history | oh-my-zsh | vimrc
os.chdir(cfg_path)
cfg_etc_files = ['hosts']
cfg_home_dot_files = ['zshrc', 'oh-my-zsh', 'zsh_history', 'vimrc']
list(map((lambda f:os.system('sudo cp ./%s /etc' %f)), cfg_etc_files))
list(map((lambda f:os.system('cp -r ./%s ~/.%s' %(f, f))), cfg_home_dot_files))
# 需要改进,改成在.zshrc最后加上 别名alias, 环境变量export 等, 而非覆盖
# ---------------------------------------------------------------------
# 添加微软雅黑ttc文件到ubnutu系统字体库,并注册
# 1. 复制字体文件到 系统字体文件存放处
os.system('sudo cp -r ./ms_fonts /usr/share/fonts/')
# 2. 更改字体权限
os.system('sudo chmod 644 /usr/share/fonts/ms_fonts/*')
# 3. 进入存放字体的新文件夹
os.system('cd /usr/share/fonts/ms_fonts/')
# 4. 下面三行命令相当于跟系统签到备案
os.system('sudo mkfontscale')
os.system('sudo mkfontdir')
os.system('sudo fc-cache -fv')
print('add Microsoft YaHei to ubuntu system fonts successfully!')
print('Now you can set chrome/sublime-text-3 to use Monaco | Microsoft YaHei | YaHei Consolas Hybrid.')
# ---------------------------------------------------------------------
# sublime chinese input
# sublime-pkg | user preference | 补充sublimeCodeIntel这个自动补全插件 | ConvertToUTF8用来正确显示中文
sublime_plugins = ['alignment', 'All Autocompelete', 'Babel', 'BracketHighliter', 'Emmet' ,'Git', 'HTML-CSS-JS prettify', 'JSformat', 'MarkdownEditing', 'python3', 'PyV8', 'React ES6 Snippet', 'Terminal', 'Vue Syntax Highlighter', 'SublimeCodeInter', 'ConvertToUTF8']
os.system('sudo cp -r ./dot_config/sublime-text-3 ~/.config/sublime-text-3')
os.system('sudo chmod -R 777 ~/.config/sublime-text-3')
# ---------------------------------------------------------------------
# 建议手动操作的,因为可能不需要这样的配置,比如不需要导入书签,或不同的ssh设置
# bookmark | daimaku
# ssh | gitconfig (可选)
# mysql apt安装:mysql设置密码会被打断
# mysql_apt_install = ['mysql-server', 'mysql-client', 'libmysqlclient-dev']
# 配置触摸板(可选,如果可用,就不需要配置)
# sudo vim /usr/share/X11/xorg.conf.d/50-synaptics.conf
# Section "InputClass"
# Identifier "touchpad catchall"
# Driver "synaptics"
# MatchIsTouchpad "on"
# Option "TapButton1" "1"
# Option "VertTwoFingerScroll" "1"
# Option "HorizTwoFingerScroll" "1"
# Option "EmulateTwoFingerMinZ" "50"
# Option "EmulateTwoFingerMinW" "10"
delete_ubuntu_default_desktop = [
'ubuntu-gnome-desktop' #gnome desktop
'gnome-shell',
'unity8' , #unity
'ubuntu-system-settings',
'webbrowser-app'
]
# wu shan chu de
install_default_app = [
'gnome-power-manager'
]
# sudo apt-get -y purge unity
# sudo apt-get -y purge unity-common
# sudo apt-get -y purge unity-lens*
# sudo apt-get -y purge unity-services
# sudo apt-get -y purge unity-asset-pool
# sudo apt remove –purge unity-lens-applications
# del gnome ppa src (optional)
# sudo apt-get install ppa-purge
# sudo ppa-purge ppa:gnome3-team/gnome3
# sudo ppa-purge ppa:gnome3-team/gnome3-staging
#sudo apt-get -y --auto-remove purge unity unity-2d*
#sudo apt-get autoremove gcalctool #计算器 | 33.685606 | 282 | 0.631058 |
7dc2e34b14cc12c49debd2106be66fde33578a61 | 205 | py | Python | problem/01000~09999/01871/1871.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/01871/1871.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/01871/1871.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | W=lambda A:(ord(A[0])-ord('A'))*26*26+(ord(A[1])-ord('A'))*26+(ord(A[2])-ord('A'))
for _ in range(int(input())):
A,B=input().split('-')
if abs(W(A)-int(B))<=100: print('nice')
else: print('not nice') | 41 | 82 | 0.541463 |
614452a4592dbc40b2347050c52965b3e654fc09 | 961 | py | Python | database.py | sedat/yield_explorer | a1322c54ecef2f4d57c8739c8698594bca1079eb | [
"MIT"
] | 42 | 2021-02-27T20:07:46.000Z | 2022-02-27T12:51:44.000Z | database.py | ZenkoLab/yield_explorer | a1322c54ecef2f4d57c8739c8698594bca1079eb | [
"MIT"
] | 1 | 2021-03-05T22:04:17.000Z | 2021-03-14T18:36:33.000Z | database.py | ZenkoLab/yield_explorer | a1322c54ecef2f4d57c8739c8698594bca1079eb | [
"MIT"
] | 7 | 2021-03-03T02:02:33.000Z | 2021-11-29T14:13:05.000Z | import mysql.connector
db = mysql.connector.connect(host='localhost',
user='root',
passwd='root',
auth_plugin='mysql_native_password',
db="yield_db")
cursor = db.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS yield_history ( id INT NOT NULL AUTO_INCREMENT, pool VARCHAR(50) NOT NULL, token_amount FLOAT NOT NULL, token_price FLOAT NOT NULL, deposit FLOAT NOT NULL, yield FLOAT NOT NULL, date datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY ( id ) )')
cursor.execute('CREATE TABLE IF NOT EXISTS lp_yield_history ( id INT NOT NULL AUTO_INCREMENT, pool VARCHAR(50) NOT NULL, first_token_amount FLOAT NOT NULL, second_token_amount FLOAT NOT NULL, first_token_price FLOAT NOT NULL, second_token_price FLOAT NOT NULL, deposit FLOAT NOT NULL, yield FLOAT NOT NULL, date datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY ( id ) )')
| 96.1 | 379 | 0.69719 |
a57874054f7d1858c2cbd401cd26e5f18f645745 | 2,620 | py | Python | src/reloadex/linux/ctypes_wrappers/_timerfd.py | iljau/reloadex | 829b173e65408a54acca36caefbfcfa5f645d8b1 | [
"MIT"
] | 1 | 2019-10-08T04:22:41.000Z | 2019-10-08T04:22:41.000Z | src/reloadex/linux/ctypes_wrappers/_timerfd.py | iljau/reloadex | 829b173e65408a54acca36caefbfcfa5f645d8b1 | [
"MIT"
] | null | null | null | src/reloadex/linux/ctypes_wrappers/_timerfd.py | iljau/reloadex | 829b173e65408a54acca36caefbfcfa5f645d8b1 | [
"MIT"
] | null | null | null | # https://github.com/yrro/pilight/blob/0d0b40e68d03fbbf01e496a6a8e12cac865a4810/ctimerfd.py
import ctypes
import ctypes.util
import os
import struct
from _ctypes import Structure, POINTER, get_errno
from ctypes import cdll, c_int, c_long, CDLL
from reloadex.linux.ctypes_wrappers.common import error_text, libc
__all__ = [
"CLOCK_MONOTONIC",
"TFD_CLOEXEC",
"TFD_NONBLOCK",
"timerfd_create",
"timerfd_settime",
"itimerspec",
"timespec",
"timerfd_read"
]
#linux/time.h
#49:#define CLOCK_MONOTONIC 1
CLOCK_MONOTONIC = 1
# # TFD_CLOEXEC = 02000000,
# #define TFD_CLOEXEC TFD_CLOEXEC
# # TFD_NONBLOCK = 00004000
# #define TFD_NONBLOCK TFD_NONBLOCK
TFD_CLOEXEC = os.O_CLOEXEC
TFD_NONBLOCK = os.O_NONBLOCK
# int timerfd_create(int clockid, int flags);
timerfd_create = libc.timerfd_create
timerfd_create.argtypes = [c_int, c_int]
def res_timerfd_create(fd):
if fd == -1:
errno = get_errno()
raise OSError(errno, error_text(errno))
assert fd >= 0
return fd
timerfd_create.restype = res_timerfd_create
# #define __SLONGWORD_TYPE long int
# # define __SYSCALL_SLONG_TYPE __SLONGWORD_TYPE
# #define __TIME_T_TYPE __SYSCALL_SLONG_TYPE
# __STD_TYPE __TIME_T_TYPE __time_t; /* Seconds since the Epoch. */
# /* Signed long type used in system calls. */
# __STD_TYPE __SYSCALL_SLONG_TYPE __syscall_slong_t;
# struct timespec
# {
# __time_t tv_sec; /* Seconds. */
# __syscall_slong_t tv_nsec; /* Nanoseconds. */
# };
class timespec(Structure):
_fields_ = [("tv_sec", c_long), ("tv_nsec", c_long)]
# struct itimerspec
# {
# struct timespec it_interval;
# struct timespec it_value;
# };
class itimerspec(Structure):
_fields_ = [("it_interval", timespec), ("it_value", timespec)]
# int timerfd_settime(int fd, int flags,
# const struct itimerspec *new_value,
# struct itimerspec *old_value);
timerfd_settime = libc.timerfd_settime
timerfd_settime.argtypes = [c_int, c_int, POINTER(itimerspec), POINTER(itimerspec)]
def res_timerfd_settime(r):
if r == -1:
errno = get_errno()
raise OSError(errno, error_text(errno))
assert r >= 0
return r
timerfd_settime.restype = res_timerfd_settime
def timerfd_read(fd) -> int:
# read(2) returns an unsigned 8-byte integer (uint64_t) containing the
# number of expirations that have occurred.
# (The returned value
# is in host byte order—that is, the native byte order for inte‐
# gers on the host machine.)
read_buffer = os.read(fd, 8)
return struct.unpack('@Q', read_buffer)[0] | 27.87234 | 91 | 0.703435 |
bea9dfcb2219e8532e7eb8004aada3b4cb8ce588 | 28,295 | py | Python | wandb/sdk/internal/handler.py | TachikakaMin/client | 27d1ef98285e3cb94881b370a8c37bfb310000c1 | [
"MIT"
] | 1 | 2021-11-15T08:26:28.000Z | 2021-11-15T08:26:28.000Z | wandb/sdk/internal/handler.py | webclinic017/client | 8225a30e2db2094d817d3048a66edfaa8803941c | [
"MIT"
] | null | null | null | wandb/sdk/internal/handler.py | webclinic017/client | 8225a30e2db2094d817d3048a66edfaa8803941c | [
"MIT"
] | null | null | null | #
# -*- coding: utf-8 -*-
"""Handle Manager."""
from __future__ import print_function
import json
import logging
import math
import numbers
import os
from threading import Event
import time
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
TYPE_CHECKING,
)
import six
from six.moves.queue import Queue
from wandb.proto import wandb_internal_pb2
from wandb.proto.wandb_internal_pb2 import Record, Result
from . import meta, sample, stats
from . import tb_watcher
from .settings_static import SettingsStatic
from ..interface.interface_queue import InterfaceQueue
from ..lib import handler_util, proto_util
SummaryDict = Dict[str, Any]
logger = logging.getLogger(__name__)
def _dict_nested_set(target: Dict[str, Any], key_list: Sequence[str], v: Any) -> None:
# recurse down the dictionary structure:
for k in key_list[:-1]:
target.setdefault(k, {})
new_target = target.get(k)
if TYPE_CHECKING:
new_target = cast(Dict[str, Any], new_target)
target = new_target
# use the last element of the key to write the leaf:
target[key_list[-1]] = v
class HandleManager(object):
_consolidated_summary: SummaryDict
_sampled_history: Dict[str, sample.UniformSampleAccumulator]
_settings: SettingsStatic
_record_q: "Queue[Record]"
_result_q: "Queue[Result]"
_stopped: Event
_sender_q: "Queue[Record]"
_writer_q: "Queue[Record]"
_interface: InterfaceQueue
_system_stats: Optional[stats.SystemStats]
_tb_watcher: Optional[tb_watcher.TBWatcher]
_metric_defines: Dict[str, wandb_internal_pb2.MetricRecord]
_metric_globs: Dict[str, wandb_internal_pb2.MetricRecord]
_metric_track: Dict[Tuple[str, ...], float]
_metric_copy: Dict[Tuple[str, ...], Any]
_track_time: Optional[float]
_accumulate_time: float
_artifact_xid_done: Dict[str, wandb_internal_pb2.ArtifactDoneRequest]
def __init__(
self,
settings: SettingsStatic,
record_q: "Queue[Record]",
result_q: "Queue[Result]",
stopped: Event,
sender_q: "Queue[Record]",
writer_q: "Queue[Record]",
interface: InterfaceQueue,
) -> None:
self._settings = settings
self._record_q = record_q
self._result_q = result_q
self._stopped = stopped
self._sender_q = sender_q
self._writer_q = writer_q
self._interface = interface
self._tb_watcher = None
self._system_stats = None
self._step = 0
self._track_time = None
self._accumulate_time = 0
self._run_start_time = 0
# keep track of summary from key/val updates
self._consolidated_summary = dict()
self._sampled_history = dict()
self._metric_defines = dict()
self._metric_globs = dict()
self._metric_track = dict()
self._metric_copy = dict()
# TODO: implement release protocol to clean this up
self._artifact_xid_done = dict()
def __len__(self) -> int:
return self._record_q.qsize()
def handle(self, record: Record) -> None:
record_type = record.WhichOneof("record_type")
assert record_type
handler_str = "handle_" + record_type
handler: Callable[[Record], None] = getattr(self, handler_str, None)
assert handler, "unknown handle: {}".format(handler_str)
handler(record)
def handle_request(self, record: Record) -> None:
request_type = record.request.WhichOneof("request_type")
assert request_type
handler_str = "handle_request_" + request_type
handler: Callable[[Record], None] = getattr(self, handler_str, None)
if request_type != "network_status":
logger.debug("handle_request: {}".format(request_type))
assert handler, "unknown handle: {}".format(handler_str)
handler(record)
def _dispatch_record(self, record: Record, always_send: bool = False) -> None:
if not self._settings._offline or always_send:
self._sender_q.put(record)
if not record.control.local and self._writer_q:
self._writer_q.put(record)
def debounce(self) -> None:
pass
def handle_request_defer(self, record: Record) -> None:
defer = record.request.defer
state = defer.state
logger.info("handle defer: {}".format(state))
# only handle flush tb (sender handles the rest)
if state == defer.FLUSH_STATS:
if self._system_stats:
# TODO(jhr): this could block so we dont really want to call shutdown
# from handler thread
self._system_stats.shutdown()
elif state == defer.FLUSH_TB:
if self._tb_watcher:
# shutdown tensorboard workers so we get all metrics flushed
self._tb_watcher.finish()
self._tb_watcher = None
elif state == defer.FLUSH_SUM:
self._save_summary(self._consolidated_summary, flush=True)
# defer is used to drive the sender finish state machine
self._dispatch_record(record, always_send=True)
def handle_request_login(self, record: Record) -> None:
self._dispatch_record(record)
def handle_run(self, record: Record) -> None:
self._dispatch_record(record)
def handle_stats(self, record: Record) -> None:
self._dispatch_record(record)
def handle_config(self, record: Record) -> None:
self._dispatch_record(record)
def handle_output(self, record: Record) -> None:
self._dispatch_record(record)
def handle_files(self, record: Record) -> None:
self._dispatch_record(record)
def handle_artifact(self, record: Record) -> None:
self._dispatch_record(record)
def handle_alert(self, record: Record) -> None:
self._dispatch_record(record)
def _save_summary(self, summary_dict: SummaryDict, flush: bool = False) -> None:
summary = wandb_internal_pb2.SummaryRecord()
for k, v in six.iteritems(summary_dict):
update = summary.update.add()
update.key = k
update.value_json = json.dumps(v)
record = wandb_internal_pb2.Record(summary=summary)
if flush:
self._dispatch_record(record)
elif not self._settings._offline:
self._sender_q.put(record)
def _save_history(self, record: Record) -> None:
for item in record.history.item:
# TODO(jhr) save nested keys?
k = item.key
v = json.loads(item.value_json)
if isinstance(v, numbers.Real):
self._sampled_history.setdefault(k, sample.UniformSampleAccumulator())
self._sampled_history[k].add(v)
def _update_summary_metrics(
self,
s: wandb_internal_pb2.MetricSummary,
kl: List[str],
v: "numbers.Real",
float_v: float,
goal_max: Optional[bool],
) -> bool:
updated = False
best_key: Optional[Tuple[str, ...]] = None
if s.none:
return False
if s.copy:
# non key list copy already done in _update_summary
if len(kl) > 1:
_dict_nested_set(self._consolidated_summary, kl, v)
return True
if s.last:
last_key = tuple(kl + ["last"])
old_last = self._metric_track.get(last_key)
if old_last is None or float_v != old_last:
self._metric_track[last_key] = float_v
_dict_nested_set(self._consolidated_summary, last_key, v)
updated = True
if s.best:
best_key = tuple(kl + ["best"])
if s.max or best_key and goal_max:
max_key = tuple(kl + ["max"])
old_max = self._metric_track.get(max_key)
if old_max is None or float_v > old_max:
self._metric_track[max_key] = float_v
if s.max:
_dict_nested_set(self._consolidated_summary, max_key, v)
updated = True
if best_key:
_dict_nested_set(self._consolidated_summary, best_key, v)
updated = True
# defaulting to minimize if goal is not supecified
if s.min or best_key and not goal_max:
min_key = tuple(kl + ["min"])
old_min = self._metric_track.get(min_key)
if old_min is None or float_v < old_min:
self._metric_track[min_key] = float_v
if s.min:
_dict_nested_set(self._consolidated_summary, min_key, v)
updated = True
if best_key:
_dict_nested_set(self._consolidated_summary, best_key, v)
updated = True
if s.mean:
tot_key = tuple(kl + ["tot"])
num_key = tuple(kl + ["num"])
avg_key = tuple(kl + ["mean"])
tot = self._metric_track.get(tot_key, 0.0)
num = self._metric_track.get(num_key, 0)
tot += float_v
num += 1
self._metric_track[tot_key] = tot
self._metric_track[num_key] = num
_dict_nested_set(self._consolidated_summary, avg_key, tot / num)
updated = True
return updated
def _update_summary_leaf(
self,
kl: List[str],
v: Any,
d: Optional[wandb_internal_pb2.MetricRecord] = None,
) -> bool:
has_summary = d and d.HasField("summary")
if len(kl) == 1:
copy_key = tuple(kl)
old_copy = self._metric_copy.get(copy_key)
if old_copy is None or v != old_copy:
self._metric_copy[copy_key] = v
# Store copy metric if not specified, or copy behavior
if not has_summary or (d and d.summary.copy):
self._consolidated_summary[kl[0]] = v
return True
if not d:
return False
if not has_summary:
return False
if not isinstance(v, numbers.Real):
return False
if math.isnan(v):
return False
float_v = float(v)
goal_max = None
if d.goal:
goal_max = d.goal == d.GOAL_MAXIMIZE
if self._update_summary_metrics(
d.summary, kl=kl, v=v, float_v=float_v, goal_max=goal_max
):
return True
return False
def _update_summary_list(
self,
kl: List[str],
v: Any,
d: Optional[wandb_internal_pb2.MetricRecord] = None,
) -> bool:
metric_key = ".".join([k.replace(".", "\\.") for k in kl])
d = self._metric_defines.get(metric_key, d)
# if the dict has _type key, its a wandb table object
if isinstance(v, dict) and not handler_util.metric_is_wandb_dict(v):
updated = False
for nk, nv in six.iteritems(v):
if self._update_summary_list(kl=kl[:] + [nk], v=nv, d=d):
updated = True
return updated
# If the dict is a media object, update the pointer to the latest alias
elif isinstance(v, dict) and handler_util.metric_is_wandb_dict(v):
if "_latest_artifact_path" in v and "artifact_path" in v:
# TODO: Make non-destructive?
v["artifact_path"] = v["_latest_artifact_path"]
updated = self._update_summary_leaf(kl=kl, v=v, d=d)
return updated
def _update_summary_media_objects(self, v: Dict[str, Any]) -> Dict[str, Any]:
# For now, non recursive - just top level
for nk, nv in six.iteritems(v):
if (
isinstance(nv, dict)
and handler_util.metric_is_wandb_dict(nv)
and "_latest_artifact_path" in nv
and "artifact_path" in nv
):
# TODO: Make non-destructive?
nv["artifact_path"] = nv["_latest_artifact_path"]
v[nk] = nv
return v
def _update_summary(self, history_dict: Dict[str, Any]) -> bool:
# keep old behavior fast path if no define metrics have been used
if not self._metric_defines:
history_dict = self._update_summary_media_objects(history_dict)
self._consolidated_summary.update(history_dict)
return True
updated = False
for k, v in six.iteritems(history_dict):
if self._update_summary_list(kl=[k], v=v):
updated = True
return updated
def _history_assign_step(self, record: Record, history_dict: Dict) -> None:
has_step = record.history.HasField("step")
item = record.history.item.add()
item.key = "_step"
if has_step:
step = record.history.step.num
history_dict["_step"] = step
item.value_json = json.dumps(step)
self._step = step + 1
else:
history_dict["_step"] = self._step
item.value_json = json.dumps(self._step)
self._step += 1
def _history_define_metric(
self, hkey: str
) -> Optional[wandb_internal_pb2.MetricRecord]:
"""check for hkey match in glob metrics, return defined metric."""
# Dont define metric for internal metrics
if hkey.startswith("_"):
return None
for k, mglob in six.iteritems(self._metric_globs):
if k.endswith("*"):
if hkey.startswith(k[:-1]):
m = wandb_internal_pb2.MetricRecord()
m.CopyFrom(mglob)
m.ClearField("glob_name")
m.options.defined = False
m.name = hkey
return m
return None
def _history_update_leaf(
self, kl: List[str], v: Any, history_dict: Dict, update_history: Dict[str, Any]
) -> None:
hkey = ".".join([k.replace(".", "\\.") for k in kl])
m = self._metric_defines.get(hkey)
if not m:
m = self._history_define_metric(hkey)
if not m:
return
mr = wandb_internal_pb2.Record()
mr.metric.CopyFrom(m)
mr.control.local = True # Dont store this, just send it
self._handle_defined_metric(mr)
if m.options.step_sync and m.step_metric:
if m.step_metric not in history_dict:
copy_key = tuple([m.step_metric])
step = self._metric_copy.get(copy_key)
if step is not None:
update_history[m.step_metric] = step
def _history_update_list(
self, kl: List[str], v: Any, history_dict: Dict, update_history: Dict[str, Any]
) -> None:
if isinstance(v, dict):
for nk, nv in six.iteritems(v):
self._history_update_list(
kl=kl[:] + [nk],
v=nv,
history_dict=history_dict,
update_history=update_history,
)
return
self._history_update_leaf(
kl=kl, v=v, history_dict=history_dict, update_history=update_history
)
def _history_update(self, record: Record, history_dict: Dict) -> None:
# if syncing an old run, we can skip this logic
if history_dict.get("_step") is None:
self._history_assign_step(record, history_dict)
update_history: Dict[str, Any] = {}
# Look for metric matches
if self._metric_defines or self._metric_globs:
for hkey, hval in six.iteritems(history_dict):
self._history_update_list([hkey], hval, history_dict, update_history)
if update_history:
history_dict.update(update_history)
for k, v in six.iteritems(update_history):
item = record.history.item.add()
item.key = k
item.value_json = json.dumps(v)
def handle_history(self, record: Record) -> None:
history_dict = proto_util.dict_from_proto_list(record.history.item)
# Inject _runtime if it is not present
if history_dict is not None:
if "_runtime" not in history_dict:
self._history_assign_runtime(record, history_dict)
self._history_update(record, history_dict)
self._dispatch_record(record)
self._save_history(record)
updated = self._update_summary(history_dict)
if updated:
self._save_summary(self._consolidated_summary)
def handle_summary(self, record: Record) -> None:
summary = record.summary
for item in summary.update:
if len(item.nested_key) > 0:
# we use either key or nested_key -- not both
assert item.key == ""
key = tuple(item.nested_key)
else:
# no counter-assertion here, because technically
# summary[""] is valid
key = (item.key,)
target = self._consolidated_summary
# recurse down the dictionary structure:
for prop in key[:-1]:
target = target[prop]
# use the last element of the key to write the leaf:
target[key[-1]] = json.loads(item.value_json)
for item in summary.remove:
if len(item.nested_key) > 0:
# we use either key or nested_key -- not both
assert item.key == ""
key = tuple(item.nested_key)
else:
# no counter-assertion here, because technically
# summary[""] is valid
key = (item.key,)
target = self._consolidated_summary
# recurse down the dictionary structure:
for prop in key[:-1]:
target = target[prop]
# use the last element of the key to erase the leaf:
del target[key[-1]]
self._save_summary(self._consolidated_summary)
def handle_exit(self, record: Record) -> None:
if self._track_time is not None:
self._accumulate_time += time.time() - self._track_time
record.exit.runtime = int(self._accumulate_time)
self._dispatch_record(record, always_send=True)
def handle_final(self, record: Record) -> None:
self._dispatch_record(record, always_send=True)
def handle_preempting(self, record: Record) -> None:
self._dispatch_record(record)
def handle_header(self, record: Record) -> None:
self._dispatch_record(record)
def handle_footer(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_check_version(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_attach(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_log_artifact(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_artifact_send(self, record: Record) -> None:
assert record.control.req_resp
result = wandb_internal_pb2.Result(uuid=record.uuid)
self._dispatch_record(record)
# send response immediately, the request will be polled for result
xid = record.uuid
result.response.artifact_send_response.xid = xid
self._result_q.put(result)
def handle_request_artifact_poll(self, record: Record) -> None:
assert record.control.req_resp
xid = record.request.artifact_poll.xid
assert xid
result = wandb_internal_pb2.Result(uuid=record.uuid)
done_req = self._artifact_xid_done.get(xid)
if done_req:
result.response.artifact_poll_response.artifact_id = done_req.artifact_id
result.response.artifact_poll_response.error_message = (
done_req.error_message
)
result.response.artifact_poll_response.ready = True
self._result_q.put(result)
def handle_request_artifact_done(self, record: Record) -> None:
assert not record.control.req_resp
done_req = record.request.artifact_done
xid = done_req.xid
assert xid
self._artifact_xid_done[xid] = done_req
# def handle_request_artifact_release(self, record: Record) -> None:
# assert record.control.req_resp
# # TODO: implement release protocol to clean up _artifact_xid_done dict
def handle_telemetry(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_run_start(self, record: Record) -> None:
run_start = record.request.run_start
assert run_start
assert run_start.run
self._run_start_time = run_start.run.start_time.ToSeconds()
self._track_time = time.time()
if run_start.run.resumed and run_start.run.runtime:
self._accumulate_time = run_start.run.runtime
else:
self._accumulate_time = 0
if not self._settings._disable_stats:
pid = os.getpid()
self._system_stats = stats.SystemStats(pid=pid, interface=self._interface)
self._system_stats.start()
if not self._settings._disable_meta and not run_start.run.resumed:
run_meta = meta.Meta(settings=self._settings, interface=self._interface)
run_meta.probe()
run_meta.write()
self._tb_watcher = tb_watcher.TBWatcher(
self._settings, interface=self._interface, run_proto=run_start.run
)
if run_start.run.resumed:
self._step = run_start.run.starting_step
result = wandb_internal_pb2.Result(uuid=record.uuid)
self._result_q.put(result)
def handle_request_resume(self, record: Record) -> None:
if self._system_stats is not None:
logger.info("starting system metrics thread")
self._system_stats.start()
if self._track_time is not None:
self._accumulate_time += time.time() - self._track_time
self._track_time = time.time()
def handle_request_pause(self, record: Record) -> None:
if self._system_stats is not None:
logger.info("stopping system metrics thread")
self._system_stats.shutdown()
if self._track_time is not None:
self._accumulate_time += time.time() - self._track_time
self._track_time = None
def handle_request_poll_exit(self, record: Record) -> None:
self._dispatch_record(record, always_send=True)
def handle_request_stop_status(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_network_status(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_status(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_get_summary(self, record: Record) -> None:
result = wandb_internal_pb2.Result(uuid=record.uuid)
for key, value in six.iteritems(self._consolidated_summary):
item = wandb_internal_pb2.SummaryItem()
item.key = key
item.value_json = json.dumps(value)
result.response.get_summary_response.item.append(item)
self._result_q.put(result)
def handle_tbrecord(self, record: Record) -> None:
logger.info("handling tbrecord: %s", record)
if self._tb_watcher:
tbrecord = record.tbrecord
self._tb_watcher.add(tbrecord.log_dir, tbrecord.save, tbrecord.root_dir)
self._dispatch_record(record)
def _handle_defined_metric(self, record: wandb_internal_pb2.Record) -> None:
metric = record.metric
if metric._control.overwrite:
self._metric_defines.setdefault(
metric.name, wandb_internal_pb2.MetricRecord()
).CopyFrom(metric)
else:
self._metric_defines.setdefault(
metric.name, wandb_internal_pb2.MetricRecord()
).MergeFrom(metric)
# before dispatching, make sure step_metric is defined, if not define it and
# dispatch it locally first
metric = self._metric_defines[metric.name]
if metric.step_metric and metric.step_metric not in self._metric_defines:
m = wandb_internal_pb2.MetricRecord(name=metric.step_metric)
self._metric_defines[metric.step_metric] = m
mr = wandb_internal_pb2.Record()
mr.metric.CopyFrom(m)
mr.control.local = True # Dont store this, just send it
self._dispatch_record(mr)
self._dispatch_record(record)
def _handle_glob_metric(self, record: wandb_internal_pb2.Record) -> None:
metric = record.metric
if metric._control.overwrite:
self._metric_globs.setdefault(
metric.glob_name, wandb_internal_pb2.MetricRecord()
).CopyFrom(metric)
else:
self._metric_globs.setdefault(
metric.glob_name, wandb_internal_pb2.MetricRecord()
).MergeFrom(metric)
self._dispatch_record(record)
def handle_metric(self, record: Record) -> None:
"""Handle MetricRecord.
Walkthrough of the life of a MetricRecord:
Metric defined:
- run.define_metric() parses arguments create wandb_metric.Metric
- build MetricRecord publish to interface
- handler (this function) keeps list of metrics published:
- self._metric_defines: Fully defined metrics
- self._metric_globs: metrics that have a wildcard
- dispatch writer and sender thread
- writer: records are saved to persistent store
- sender: fully defined metrics get mapped into metadata for UI
History logged:
- handle_history
- check if metric matches _metric_defines
- if not, check if metric matches _metric_globs
- if _metric globs match, generate defined metric and call _handle_metric
Args:
record (Record): Metric record to process
"""
if record.metric.name:
self._handle_defined_metric(record)
elif record.metric.glob_name:
self._handle_glob_metric(record)
def handle_request_sampled_history(self, record: Record) -> None:
result = wandb_internal_pb2.Result(uuid=record.uuid)
for key, sampled in six.iteritems(self._sampled_history):
item = wandb_internal_pb2.SampledHistoryItem()
item.key = key
values: Iterable[Any] = sampled.get()
if all(isinstance(i, numbers.Integral) for i in values):
item.values_int.extend(values)
elif all(isinstance(i, numbers.Real) for i in values):
item.values_float.extend(values)
result.response.sampled_history_response.item.append(item)
self._result_q.put(result)
def handle_request_shutdown(self, record: Record) -> None:
# TODO(jhr): should we drain things and stop new requests from coming in?
result = wandb_internal_pb2.Result(uuid=record.uuid)
self._result_q.put(result)
self._stopped.set()
def finish(self) -> None:
logger.info("shutting down handler")
if self._tb_watcher:
self._tb_watcher.finish()
def __next__(self) -> Record:
return self._record_q.get(block=True)
next = __next__
def _history_assign_runtime(self, record: Record, history_dict: Dict) -> None:
# _runtime calculation is meaningless if there is no _timestamp
if "_timestamp" not in history_dict:
return
# if it is offline sync, self._run_start_time is 0
# in that case set it to the first tfevent timestamp
if self._run_start_time == 0:
self._run_start_time = history_dict["_timestamp"]
history_dict["_runtime"] = int(
history_dict["_timestamp"] - self._run_start_time
)
item = record.history.item.add()
item.key = "_runtime"
item.value_json = json.dumps(history_dict[item.key])
| 37.526525 | 87 | 0.614667 |
9a13f1781ca9ffb06117a903e82b84e7ae4f62aa | 548 | py | Python | tests/test_execute_sql.py | changrunner/zeppos_ms_sql_server_proxy | a32464c5d2bde425322534aacf30669b909ac984 | [
"Apache-2.0"
] | null | null | null | tests/test_execute_sql.py | changrunner/zeppos_ms_sql_server_proxy | a32464c5d2bde425322534aacf30669b909ac984 | [
"Apache-2.0"
] | null | null | null | tests/test_execute_sql.py | changrunner/zeppos_ms_sql_server_proxy | a32464c5d2bde425322534aacf30669b909ac984 | [
"Apache-2.0"
] | null | null | null | import unittest
from zeppos_ms_sql_server_proxy.execute_sql import ExecuteSql
class TestTheProjectMethods(unittest.TestCase):
def test_get_execute_methods(self):
execute_sql = ExecuteSql.execute(
connection_string="DRIVER={ODBC Driver 17 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;App=Test;",
execute_statement="execute [sys].[sp_columns] 'spt_monitor'"
)
self.assertEqual(201, execute_sql.status_code)
if __name__ == '__main__':
unittest.main() | 36.533333 | 151 | 0.737226 |
db6013b6bef07605c71053edeffbe6660ca4bc91 | 1,647 | py | Python | tests/test_environments/test_atari_wrappers.py | mehulrastogi/genrl | 589a3619497b38eaa376168c78b9f84d4ff434e5 | [
"MIT"
] | 1 | 2020-06-20T21:20:14.000Z | 2020-06-20T21:20:14.000Z | tests/test_environments/test_atari_wrappers.py | mehulrastogi/genrl | 589a3619497b38eaa376168c78b9f84d4ff434e5 | [
"MIT"
] | 3 | 2020-08-21T08:58:44.000Z | 2020-09-01T11:22:38.000Z | tests/test_environments/test_atari_wrappers.py | mehulrastogi/genrl | 589a3619497b38eaa376168c78b9f84d4ff434e5 | [
"MIT"
] | null | null | null | import shutil
import gym
from genrl.agents import DQN
from genrl.environments import AtariEnv, AtariPreprocessing, FrameStack, VectorEnv
from genrl.trainers import OffPolicyTrainer
class TestAtari:
def test_atari_preprocessing(self):
"""
Tests Atari Preprocessing wrapper
"""
env = gym.make("Pong-v0")
atari_env = AtariPreprocessing(env)
state = atari_env.reset()
assert state.shape == (84, 84)
action = atari_env.action_space.sample()
state, reward, done, info = atari_env.step(action)
assert state.shape == (84, 84)
assert isinstance(reward, float)
assert isinstance(done, bool)
assert isinstance(info, dict)
atari_env.close()
def test_framestack(self):
"""
Tests Frame Stack wrapper
"""
env = gym.make("Pong-v0")
atari_env = FrameStack(env)
state = atari_env.reset()
assert state.shape == (1, 4, 210, 160, 3)
action = atari_env.action_space.sample()
state, reward, done, info = atari_env.step(action)
assert state.shape == (1, 4, 210, 160, 3)
assert isinstance(reward, float)
assert isinstance(done, bool)
assert isinstance(info, dict)
atari_env.close()
def test_atari_env(self):
"""
Tests working of Atari Wrappers and the AtariEnv function
"""
env = VectorEnv("Pong-v0", env_type="atari")
algo = DQN("cnn", env, replay_size=100)
trainer = OffPolicyTrainer(algo, env, epochs=5, max_ep_len=200)
trainer.train()
shutil.rmtree("./logs")
| 29.945455 | 82 | 0.615058 |
676de45e3e58064fe3a0a3413c4176a63e7c5484 | 1,385 | py | Python | python/tests/kat/t_headerswithunderscoresaction.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | null | null | null | python/tests/kat/t_headerswithunderscoresaction.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | 190 | 2021-04-22T11:35:09.000Z | 2022-03-30T22:12:03.000Z | python/tests/kat/t_headerswithunderscoresaction.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | null | null | null | from kat.harness import Query
from abstract_tests import AmbassadorTest, ServiceType, HTTP
import json
class AllowHeadersWithUnderscoresTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP(name="target")
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v2
kind: Mapping
name: config__dump
ambassador_id: {self.ambassador_id}
prefix: /target/
service: http://{self.target.path.fqdn}
""")
def queries(self):
yield Query(self.url("target/"), expected=200, headers={'t_underscore':'foo'})
def check(self):
assert self.results[0].status == 200
class RejectHeadersWithUnderscoresTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP(name="target")
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v2
kind: Module
name: ambassador
ambassador_id: {self.ambassador_id}
config:
headers_with_underscores_action: REJECT_REQUEST
""")
yield self, self.format("""
---
apiVersion: ambassador/v2
kind: Mapping
name: config__dump
ambassador_id: {self.ambassador_id}
prefix: /target/
service: http://{self.target.path.fqdn}
""")
def queries(self):
yield Query(self.url("target/"), expected=400, headers={'t_underscore':'foo'})
def check(self):
assert self.results[0].status == 400
| 23.474576 | 86 | 0.693863 |
8caf6c408fddbe4a2d645a10a499ae2fdfc94a3b | 417 | py | Python | test/start_server_with_wsgi_app.py | Rollmops/restit | ddc0fc3a4bf0ffed02c59cce5e7a07b3737e1874 | [
"MIT"
] | 3 | 2020-03-08T19:44:32.000Z | 2020-03-09T19:46:15.000Z | test/start_server_with_wsgi_app.py | Rollmops/restit | ddc0fc3a4bf0ffed02c59cce5e7a07b3737e1874 | [
"MIT"
] | 11 | 2020-03-17T14:50:07.000Z | 2020-04-03T11:20:30.000Z | test/start_server_with_wsgi_app.py | Rollmops/restit | ddc0fc3a4bf0ffed02c59cce5e7a07b3737e1874 | [
"MIT"
] | null | null | null | from contextlib import contextmanager
from threading import Thread
from wsgiref.simple_server import make_server
@contextmanager
def start_server_with_wsgi_app(wsgi_app):
with make_server('', 0, wsgi_app) as httpd:
thread = Thread(target=httpd.serve_forever)
thread.start()
try:
yield httpd.server_port
finally:
httpd.shutdown()
thread.join()
| 26.0625 | 51 | 0.678657 |
505b75e7a1ec320ca81f0af625182db67d6153ee | 1,139 | py | Python | password_manager.py | ercantncy/python-mini-projects | 6eecee7f7f730baeea6105cd30828e0160915c6a | [
"MIT"
] | 1 | 2021-10-05T22:31:00.000Z | 2021-10-05T22:31:00.000Z | password_manager.py | ercantncy/python-mini-projects | 6eecee7f7f730baeea6105cd30828e0160915c6a | [
"MIT"
] | null | null | null | password_manager.py | ercantncy/python-mini-projects | 6eecee7f7f730baeea6105cd30828e0160915c6a | [
"MIT"
] | null | null | null | from cryptography.fernet import Fernet
#used just once to generate key
'''
def write_key():
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
write_key()
'''
def load_key():
file = open("key.key", "rb")
key = file.read()
file.close()
return key
key = load_key()
fer = Fernet(key)
def view():
with open('passwords.txt', 'r') as f:
for line in f.readlines():
data = line.rstrip()
user, passw = data.split("|")
print("User:", user, "| Password:",
fer.decrypt(passw.encode()).decode())
def add():
name = input('Account Name: ')
pwd = input("Password: ")
with open('passwords.txt', 'a') as f:
f.write(name + "|" + fer.encrypt(pwd.encode()).decode() + "\n")
while True:
mode = input(
"Would you like to add a new password or view existing ones (view, add), press q to quit? ").lower()
if mode == "q":
break
if mode == "view":
view()
elif mode == "add":
add()
else:
print("Invalid mode.")
continue | 21.092593 | 108 | 0.536435 |
625e99e8fdfa4f52b0d31aad8e2c2fb931d7d5ed | 221 | py | Python | myblog/home/urls.py | wuhongchuan/myblog | 1879101e8e5ff7535149784af9a16f7dfc8411c6 | [
"MIT"
] | null | null | null | myblog/home/urls.py | wuhongchuan/myblog | 1879101e8e5ff7535149784af9a16f7dfc8411c6 | [
"MIT"
] | null | null | null | myblog/home/urls.py | wuhongchuan/myblog | 1879101e8e5ff7535149784af9a16f7dfc8411c6 | [
"MIT"
] | null | null | null | from django.urls import path
from home.views import IndexView,DetailView
urlpatterns = [
# 首页的路由
path('',IndexView.as_view(),name='index'),
#详情视图的路由
path('detail/',DetailView.as_view(),name='detail'),
] | 20.090909 | 55 | 0.683258 |
debda15bea3c32f967e36ab73b2ccdfc25d46259 | 2,702 | py | Python | phablytics/web/utils.py | hacktoolkit/phablytics | 977a85b0e5035ef93dd8b745673248c983bbc294 | [
"MIT"
] | 3 | 2020-06-22T22:05:08.000Z | 2020-10-27T19:01:40.000Z | phablytics/web/utils.py | hacktoolkit/phablytics | 977a85b0e5035ef93dd8b745673248c983bbc294 | [
"MIT"
] | 2 | 2020-12-22T22:15:31.000Z | 2021-12-14T03:47:19.000Z | phablytics/web/utils.py | hacktoolkit/phablytics | 977a85b0e5035ef93dd8b745673248c983bbc294 | [
"MIT"
] | 4 | 2020-08-27T15:56:01.000Z | 2021-05-07T18:03:11.000Z | # Python Standard Library Imports
import copy
# Third Party (PyPI) Imports
from flask import (
abort,
render_template,
request,
)
from jinja2 import TemplateNotFound
# Phablytics Imports
import phablytics
from phablytics.constants import GITHUB_URL
from phablytics.settings import (
ADMIN_USERNAME,
CUSTOM_STYLESHEETS,
PHABRICATOR_INSTANCE_BASE_URL,
)
from phablytics.web.constants import (
BREADCRUMBS,
NAV_LINKS,
SITE_NAME,
)
def custom_render_template(template_name, context_data=None):
if context_data is None:
context_data = {}
context_data.update(get_context_data())
try:
response = render_template(template_name, **context_data)
return response
except TemplateNotFound:
abort(404)
def get_context_data():
nav_links = get_nav_links()
active_pages = list(filter(lambda x: x['active'], nav_links))
active_page = active_pages[0] if len(active_pages) > 0 else None
breadcrumbs = get_breadcrumbs()
if active_page:
page_title = f'{active_page["name"]} | {SITE_NAME}'
else:
page_title = SITE_NAME
context_data = {
# customizations
'custom_stylesheets': CUSTOM_STYLESHEETS,
# page meta
'nav_links': nav_links,
'breadcrumbs': breadcrumbs,
'page_title': page_title,
# general stuff
'admin_username': ADMIN_USERNAME,
'github_url': GITHUB_URL,
'phablytics_version': phablytics.__version__,
'phabricator_instance_base_url': PHABRICATOR_INSTANCE_BASE_URL,
}
return context_data
def get_nav_links():
def _format_nav_link(nav_link):
nav_link = copy.deepcopy(nav_link)
nav_link['active'] = nav_link['path'] == request.path
return nav_link
nav_links = [
_format_nav_link(nav_link)
for nav_link
in NAV_LINKS
]
return nav_links
def get_breadcrumbs():
breadcrumbs = []
path_parts = request.path.split('/')
path_parts = (
path_parts[:-1]
if len(path_parts) > 1 and path_parts[-1] == ''
else path_parts
)
for i, path_part in enumerate(path_parts):
path = '/'.join(path_parts[:i + 1]) or '/'
is_active = i + 1 == len(path_parts)
name = BREADCRUMBS.get(path, path_part.title())
breadcrumb = {
'name': name,
'url': path,
'is_active': is_active,
}
breadcrumbs.append(breadcrumb)
return breadcrumbs
def format_choices(options, include_blank=False):
choices = [('', '', )] if include_blank else []
choices.extend([(option, option, ) for option in options])
return choices
| 24.563636 | 71 | 0.646558 |
4bb70f56ac67831e8a3c2afb603fcf635938c5a3 | 18,074 | py | Python | scenegraph/exp-official/taskographyv5tiny5bagslots5_ploi/ploi_test_stats.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
] | 1 | 2022-01-30T22:06:57.000Z | 2022-01-30T22:06:57.000Z | scenegraph/exp-official/taskographyv5tiny5bagslots5_ploi/ploi_test_stats.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
] | null | null | null | scenegraph/exp-official/taskographyv5tiny5bagslots5_ploi/ploi_test_stats.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
] | null | null | null | STATS = [
{
"num_node_expansions": 0,
"search_time": 0.0209646,
"total_time": 0.12656,
"plan_length": 63,
"plan_cost": 63,
"objects_used": 258,
"objects_total": 379,
"neural_net_time": 0.07623124122619629,
"num_replanning_steps": 8,
"wall_time": 4.065988540649414
},
{
"num_node_expansions": 0,
"search_time": 0.0256551,
"total_time": 0.155697,
"plan_length": 47,
"plan_cost": 47,
"objects_used": 272,
"objects_total": 379,
"neural_net_time": 0.02973008155822754,
"num_replanning_steps": 8,
"wall_time": 4.64400839805603
},
{
"num_node_expansions": 0,
"search_time": 0.230461,
"total_time": 0.419994,
"plan_length": 105,
"plan_cost": 105,
"objects_used": 264,
"objects_total": 379,
"neural_net_time": 0.03020310401916504,
"num_replanning_steps": 6,
"wall_time": 3.95487117767334
},
{
"num_node_expansions": 0,
"search_time": 0.0373201,
"total_time": 0.174235,
"plan_length": 59,
"plan_cost": 59,
"objects_used": 237,
"objects_total": 379,
"neural_net_time": 0.029895305633544922,
"num_replanning_steps": 9,
"wall_time": 5.866229295730591
},
{
"num_node_expansions": 0,
"search_time": 0.609119,
"total_time": 0.787772,
"plan_length": 123,
"plan_cost": 123,
"objects_used": 269,
"objects_total": 379,
"neural_net_time": 0.03049182891845703,
"num_replanning_steps": 8,
"wall_time": 5.864126443862915
},
{
"num_node_expansions": 0,
"search_time": 0.0102327,
"total_time": 0.0652236,
"plan_length": 50,
"plan_cost": 50,
"objects_used": 133,
"objects_total": 217,
"neural_net_time": 0.017711877822875977,
"num_replanning_steps": 10,
"wall_time": 3.011147975921631
},
{
"num_node_expansions": 0,
"search_time": 0.0117884,
"total_time": 0.0989192,
"plan_length": 46,
"plan_cost": 46,
"objects_used": 141,
"objects_total": 217,
"neural_net_time": 0.018294095993041992,
"num_replanning_steps": 11,
"wall_time": 3.5662343502044678
},
{
"num_node_expansions": 0,
"search_time": 0.0269455,
"total_time": 0.10313,
"plan_length": 46,
"plan_cost": 46,
"objects_used": 140,
"objects_total": 217,
"neural_net_time": 0.01698470115661621,
"num_replanning_steps": 7,
"wall_time": 4.087600946426392
},
{
"num_node_expansions": 0,
"search_time": 0.609972,
"total_time": 0.736255,
"plan_length": 111,
"plan_cost": 111,
"objects_used": 154,
"objects_total": 217,
"neural_net_time": 0.0169219970703125,
"num_replanning_steps": 23,
"wall_time": 14.97317910194397
},
{
"num_node_expansions": 0,
"search_time": 0.00625033,
"total_time": 0.0404249,
"plan_length": 48,
"plan_cost": 48,
"objects_used": 125,
"objects_total": 217,
"neural_net_time": 0.01748800277709961,
"num_replanning_steps": 4,
"wall_time": 1.2836649417877197
},
{
"num_node_expansions": 0,
"search_time": 0.0260811,
"total_time": 0.147736,
"plan_length": 48,
"plan_cost": 48,
"objects_used": 208,
"objects_total": 320,
"neural_net_time": 0.02480316162109375,
"num_replanning_steps": 7,
"wall_time": 3.9755640029907227
},
{
"num_node_expansions": 0,
"search_time": 3.91758,
"total_time": 3.95436,
"plan_length": 90,
"plan_cost": 90,
"objects_used": 182,
"objects_total": 320,
"neural_net_time": 0.02401447296142578,
"num_replanning_steps": 4,
"wall_time": 9.07741641998291
},
{
"num_node_expansions": 0,
"search_time": 0.00889858,
"total_time": 0.0452188,
"plan_length": 63,
"plan_cost": 63,
"objects_used": 155,
"objects_total": 320,
"neural_net_time": 0.024259090423583984,
"num_replanning_steps": 2,
"wall_time": 0.92185378074646
},
{
"num_node_expansions": 0,
"search_time": 0.0180282,
"total_time": 0.0763766,
"plan_length": 67,
"plan_cost": 67,
"objects_used": 179,
"objects_total": 320,
"neural_net_time": 0.024612903594970703,
"num_replanning_steps": 7,
"wall_time": 2.577061653137207
},
{
"num_node_expansions": 0,
"search_time": 0.0203576,
"total_time": 0.0756848,
"plan_length": 83,
"plan_cost": 83,
"objects_used": 176,
"objects_total": 320,
"neural_net_time": 0.023926973342895508,
"num_replanning_steps": 4,
"wall_time": 1.5595667362213135
},
{
"num_node_expansions": 0,
"search_time": 0.012372,
"total_time": 0.0982573,
"plan_length": 60,
"plan_cost": 60,
"objects_used": 180,
"objects_total": 305,
"neural_net_time": 0.02280902862548828,
"num_replanning_steps": 5,
"wall_time": 2.042884349822998
},
{
"num_node_expansions": 0,
"search_time": 0.0113107,
"total_time": 0.0504191,
"plan_length": 80,
"plan_cost": 80,
"objects_used": 172,
"objects_total": 305,
"neural_net_time": 0.021673917770385742,
"num_replanning_steps": 6,
"wall_time": 1.794670820236206
},
{
"num_node_expansions": 0,
"search_time": 0.0449317,
"total_time": 0.118402,
"plan_length": 74,
"plan_cost": 74,
"objects_used": 199,
"objects_total": 305,
"neural_net_time": 0.022764205932617188,
"num_replanning_steps": 4,
"wall_time": 2.6170334815979004
},
{
"num_node_expansions": 0,
"search_time": 0.029719,
"total_time": 0.0637733,
"plan_length": 114,
"plan_cost": 114,
"objects_used": 126,
"objects_total": 212,
"neural_net_time": 0.015989303588867188,
"num_replanning_steps": 3,
"wall_time": 1.119013786315918
},
{
"num_node_expansions": 0,
"search_time": 0.0120378,
"total_time": 0.0433926,
"plan_length": 87,
"plan_cost": 87,
"objects_used": 124,
"objects_total": 212,
"neural_net_time": 0.01587510108947754,
"num_replanning_steps": 3,
"wall_time": 1.0580189228057861
},
{
"num_node_expansions": 0,
"search_time": 0.00978446,
"total_time": 0.057194,
"plan_length": 61,
"plan_cost": 61,
"objects_used": 192,
"objects_total": 365,
"neural_net_time": 0.027842044830322266,
"num_replanning_steps": 3,
"wall_time": 1.1946721076965332
},
{
"num_node_expansions": 0,
"search_time": 0.0628213,
"total_time": 0.171589,
"plan_length": 102,
"plan_cost": 102,
"objects_used": 227,
"objects_total": 365,
"neural_net_time": 0.02844071388244629,
"num_replanning_steps": 4,
"wall_time": 2.3115644454956055
},
{
"num_node_expansions": 0,
"search_time": 0.00812991,
"total_time": 0.0457111,
"plan_length": 67,
"plan_cost": 67,
"objects_used": 191,
"objects_total": 365,
"neural_net_time": 0.028986692428588867,
"num_replanning_steps": 3,
"wall_time": 1.1468777656555176
},
{
"num_node_expansions": 0,
"search_time": 0.0196922,
"total_time": 0.0798287,
"plan_length": 60,
"plan_cost": 60,
"objects_used": 208,
"objects_total": 365,
"neural_net_time": 0.029392004013061523,
"num_replanning_steps": 6,
"wall_time": 2.3833539485931396
},
{
"num_node_expansions": 0,
"search_time": 0.079238,
"total_time": 0.156227,
"plan_length": 72,
"plan_cost": 72,
"objects_used": 181,
"objects_total": 302,
"neural_net_time": 0.023373126983642578,
"num_replanning_steps": 5,
"wall_time": 1.825868844985962
},
{
"num_node_expansions": 0,
"search_time": 0.715652,
"total_time": 0.79719,
"plan_length": 51,
"plan_cost": 51,
"objects_used": 191,
"objects_total": 302,
"neural_net_time": 0.02440166473388672,
"num_replanning_steps": 12,
"wall_time": 5.24399471282959
},
{
"num_node_expansions": 0,
"search_time": 0.0202444,
"total_time": 0.0939475,
"plan_length": 65,
"plan_cost": 65,
"objects_used": 211,
"objects_total": 365,
"neural_net_time": 0.028452634811401367,
"num_replanning_steps": 8,
"wall_time": 3.0245115756988525
},
{
"num_node_expansions": 0,
"search_time": 0.102789,
"total_time": 0.198247,
"plan_length": 79,
"plan_cost": 79,
"objects_used": 218,
"objects_total": 365,
"neural_net_time": 0.029065608978271484,
"num_replanning_steps": 11,
"wall_time": 4.55614972114563
},
{
"num_node_expansions": 0,
"search_time": 0.0154962,
"total_time": 0.109251,
"plan_length": 60,
"plan_cost": 60,
"objects_used": 210,
"objects_total": 365,
"neural_net_time": 0.02926778793334961,
"num_replanning_steps": 13,
"wall_time": 4.741302251815796
},
{
"num_node_expansions": 0,
"search_time": 0.0153721,
"total_time": 0.0608046,
"plan_length": 74,
"plan_cost": 74,
"objects_used": 186,
"objects_total": 365,
"neural_net_time": 0.02894759178161621,
"num_replanning_steps": 4,
"wall_time": 1.6460025310516357
},
{
"num_node_expansions": 0,
"search_time": 0.0954243,
"total_time": 0.194576,
"plan_length": 82,
"plan_cost": 82,
"objects_used": 223,
"objects_total": 362,
"neural_net_time": 0.027848005294799805,
"num_replanning_steps": 12,
"wall_time": 4.614710807800293
},
{
"num_node_expansions": 0,
"search_time": 0.0216686,
"total_time": 0.0933824,
"plan_length": 82,
"plan_cost": 82,
"objects_used": 203,
"objects_total": 362,
"neural_net_time": 0.028284072875976562,
"num_replanning_steps": 12,
"wall_time": 3.650705337524414
},
{
"num_node_expansions": 0,
"search_time": 0.00883323,
"total_time": 0.0628755,
"plan_length": 68,
"plan_cost": 68,
"objects_used": 201,
"objects_total": 362,
"neural_net_time": 0.02834320068359375,
"num_replanning_steps": 7,
"wall_time": 2.234596014022827
},
{
"num_node_expansions": 0,
"search_time": 0.0396403,
"total_time": 0.178167,
"plan_length": 65,
"plan_cost": 65,
"objects_used": 218,
"objects_total": 322,
"neural_net_time": 0.023160934448242188,
"num_replanning_steps": 6,
"wall_time": 2.6842222213745117
},
{
"num_node_expansions": 0,
"search_time": 0.0305726,
"total_time": 0.188371,
"plan_length": 85,
"plan_cost": 85,
"objects_used": 258,
"objects_total": 441,
"neural_net_time": 0.03657793998718262,
"num_replanning_steps": 8,
"wall_time": 4.270561456680298
},
{
"num_node_expansions": 0,
"search_time": 1.3577,
"total_time": 1.44762,
"plan_length": 99,
"plan_cost": 99,
"objects_used": 248,
"objects_total": 441,
"neural_net_time": 0.03699207305908203,
"num_replanning_steps": 8,
"wall_time": 5.421973466873169
},
{
"num_node_expansions": 0,
"search_time": 0.0418557,
"total_time": 0.230808,
"plan_length": 71,
"plan_cost": 71,
"objects_used": 263,
"objects_total": 441,
"neural_net_time": 0.03700876235961914,
"num_replanning_steps": 13,
"wall_time": 6.234536409378052
},
{
"num_node_expansions": 0,
"search_time": 0.0796853,
"total_time": 0.759598,
"plan_length": 75,
"plan_cost": 75,
"objects_used": 294,
"objects_total": 441,
"neural_net_time": 0.037972450256347656,
"num_replanning_steps": 13,
"wall_time": 11.729289054870605
},
{
"num_node_expansions": 0,
"search_time": 0.0470408,
"total_time": 0.197173,
"plan_length": 91,
"plan_cost": 91,
"objects_used": 251,
"objects_total": 441,
"neural_net_time": 0.03815460205078125,
"num_replanning_steps": 8,
"wall_time": 4.103215932846069
},
{
"num_node_expansions": 0,
"search_time": 0.0188923,
"total_time": 0.0830278,
"plan_length": 76,
"plan_cost": 76,
"objects_used": 222,
"objects_total": 417,
"neural_net_time": 0.03529095649719238,
"num_replanning_steps": 7,
"wall_time": 2.737236738204956
},
{
"num_node_expansions": 0,
"search_time": 0.0161271,
"total_time": 0.08138,
"plan_length": 93,
"plan_cost": 93,
"objects_used": 226,
"objects_total": 417,
"neural_net_time": 0.03523993492126465,
"num_replanning_steps": 5,
"wall_time": 1.9196949005126953
},
{
"num_node_expansions": 0,
"search_time": 0.0151709,
"total_time": 0.109928,
"plan_length": 44,
"plan_cost": 44,
"objects_used": 245,
"objects_total": 417,
"neural_net_time": 0.03458762168884277,
"num_replanning_steps": 13,
"wall_time": 5.150935411453247
},
{
"num_node_expansions": 0,
"search_time": 0.109273,
"total_time": 0.253933,
"plan_length": 79,
"plan_cost": 79,
"objects_used": 231,
"objects_total": 417,
"neural_net_time": 0.034616708755493164,
"num_replanning_steps": 9,
"wall_time": 4.458430290222168
},
{
"num_node_expansions": 0,
"search_time": 0.0344792,
"total_time": 0.153338,
"plan_length": 83,
"plan_cost": 83,
"objects_used": 240,
"objects_total": 417,
"neural_net_time": 0.03422355651855469,
"num_replanning_steps": 8,
"wall_time": 3.5642058849334717
},
{
"num_node_expansions": 0,
"search_time": 0.0115926,
"total_time": 0.0381584,
"plan_length": 60,
"plan_cost": 60,
"objects_used": 138,
"objects_total": 232,
"neural_net_time": 0.018133878707885742,
"num_replanning_steps": 8,
"wall_time": 2.1332857608795166
},
{
"num_node_expansions": 0,
"search_time": 0.00655491,
"total_time": 0.0322429,
"plan_length": 61,
"plan_cost": 61,
"objects_used": 108,
"objects_total": 232,
"neural_net_time": 0.01815032958984375,
"num_replanning_steps": 2,
"wall_time": 0.7770214080810547
},
{
"num_node_expansions": 0,
"search_time": 0.00966664,
"total_time": 0.0623213,
"plan_length": 39,
"plan_cost": 39,
"objects_used": 127,
"objects_total": 232,
"neural_net_time": 0.018232107162475586,
"num_replanning_steps": 4,
"wall_time": 1.5998196601867676
},
{
"num_node_expansions": 0,
"search_time": 0.00663378,
"total_time": 0.0357445,
"plan_length": 59,
"plan_cost": 59,
"objects_used": 121,
"objects_total": 232,
"neural_net_time": 0.019855737686157227,
"num_replanning_steps": 4,
"wall_time": 1.3765571117401123
},
{
"num_node_expansions": 0,
"search_time": 0.0105762,
"total_time": 0.061455,
"plan_length": 55,
"plan_cost": 55,
"objects_used": 135,
"objects_total": 212,
"neural_net_time": 0.01603841781616211,
"num_replanning_steps": 3,
"wall_time": 1.1647632122039795
},
{
"num_node_expansions": 0,
"search_time": 0.0143381,
"total_time": 0.0567002,
"plan_length": 74,
"plan_cost": 74,
"objects_used": 136,
"objects_total": 212,
"neural_net_time": 0.016623735427856445,
"num_replanning_steps": 3,
"wall_time": 1.1414904594421387
},
{
"num_node_expansions": 0,
"search_time": 0.0197977,
"total_time": 0.100056,
"plan_length": 74,
"plan_cost": 74,
"objects_used": 138,
"objects_total": 212,
"neural_net_time": 0.01637434959411621,
"num_replanning_steps": 4,
"wall_time": 1.9099130630493164
},
{
"num_node_expansions": 0,
"search_time": 0.0170863,
"total_time": 0.10028,
"plan_length": 59,
"plan_cost": 59,
"objects_used": 146,
"objects_total": 212,
"neural_net_time": 0.016454219818115234,
"num_replanning_steps": 15,
"wall_time": 7.527631998062134
}
] | 28.872204 | 48 | 0.550902 |
cf8569dd45e37b36d5a957509a53c680284aa042 | 3,074 | py | Python | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/single_straight3d.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
] | 5 | 2022-01-30T07:35:58.000Z | 2022-02-08T05:45:20.000Z | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/single_straight3d.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
] | 1 | 2022-01-14T02:33:28.000Z | 2022-01-14T02:33:28.000Z | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/heads/single_straight3d.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
] | 1 | 2022-01-24T16:27:01.000Z | 2022-01-24T16:27:01.000Z | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import numpy as np
from ..registry import ROI_EXTRACTORS
from .roi_extractor import RoIAlign
@ROI_EXTRACTORS.register()
class SingleRoIExtractor3D(nn.Layer):
"""Extract RoI features from a single level feature map. """
def __init__(self,
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=16,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True,
with_global=False):
super().__init__()
self.roi_layer_type = roi_layer_type
assert self.roi_layer_type in ['RoIPool', 'RoIAlign']
self.featmap_stride = featmap_stride
self.spatial_scale = 1. / self.featmap_stride
self.output_size = output_size
self.sampling_ratio = sampling_ratio
self.pool_mode = pool_mode
self.aligned = aligned
self.with_temporal_pool = with_temporal_pool
self.with_global = with_global
self.roi_layer = RoIAlign(resolution=self.output_size,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
aligned=self.aligned)
def init_weights(self):
pass
# The shape of feat is N, C, T, H, W
def forward(self, feat, rois, rois_num):
if len(feat) >= 2:
assert self.with_temporal_pool
if self.with_temporal_pool:
xi = 0
for x in feat:
xi = xi + 1
y = paddle.mean(x, 2, keepdim=True)
feat = [paddle.mean(x, 2, keepdim=True) for x in feat]
feat = paddle.concat(feat, axis=1) # merge slow and fast
roi_feats = []
for t in range(feat.shape[2]):
if type(t) == paddle.fluid.framework.Variable:
index = paddle.to_tensor(t)
else:
data_index = np.array([t]).astype('int32')
index = paddle.to_tensor(data_index)
frame_feat = paddle.index_select(feat, index, axis=2)
frame_feat = paddle.squeeze(frame_feat,
axis=2) #axis=2,避免N=1时, 第一维度被删除.
roi_feat = self.roi_layer(frame_feat, rois, rois_num)
roi_feats.append(roi_feat)
ret = paddle.stack(roi_feats, axis=2)
return ret
| 37.950617 | 74 | 0.605725 |
d9e04d4b1725b0afaf1b2ac3ec10fb6b68018a48 | 4,792 | py | Python | unet.py | antares511/eye-in-the-sky | 15aae12e787c2b884bf34103c94f6091d8637b80 | [
"Apache-2.0"
] | 236 | 2018-12-26T12:19:48.000Z | 2022-03-30T11:59:21.000Z | unet.py | antares511/eye-in-the-sky | 15aae12e787c2b884bf34103c94f6091d8637b80 | [
"Apache-2.0"
] | 20 | 2018-12-27T05:02:24.000Z | 2022-01-25T05:58:55.000Z | unet.py | antares511/eye-in-the-sky | 15aae12e787c2b884bf34103c94f6091d8637b80 | [
"Apache-2.0"
] | 84 | 2018-12-27T17:21:43.000Z | 2022-02-27T07:13:34.000Z | import PIL
from PIL import Image
import matplotlib.pyplot as plt
from libtiff import TIFF
from libtiff import TIFFfile, TIFFimage
from scipy.misc import imresize
import numpy as np
import glob
import cv2
import os
import math
import skimage.io as io
import skimage.transform as trans
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from iou import iou
#%matplotlib inline
def UNet(shape = (None,None,4)):
# Left side of the U-Net
inputs = Input(shape)
# in_shape = inputs.shape
# print(in_shape)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv1)
conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv2)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv3)
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv4)
conv4 = BatchNormalization()(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# Bottom of the U-Net
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv5)
conv5 = BatchNormalization()(conv5)
drop5 = Dropout(0.5)(conv5)
# Upsampling Starts, right side of the U-Net
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv6)
conv6 = BatchNormalization()(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv7)
conv7 = BatchNormalization()(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv8)
conv8 = BatchNormalization()(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv9)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'random_normal')(conv9)
conv9 = BatchNormalization()(conv9)
# Output layer of the U-Net with a softmax activation
conv10 = Conv2D(9, 1, activation = 'softmax')(conv9)
model = Model(input = inputs, output = conv10)
model.compile(optimizer = Adam(lr = 0.000001), loss = 'categorical_crossentropy', metrics = ['accuracy', iou])
model.summary()
#filelist_modelweights = sorted(glob.glob('*.h5'), key=numericalSort)
#if 'model_nocropping.h5' in filelist_modelweights:
# model.load_weights('model_nocropping.h5')
return model
| 50.978723 | 136 | 0.6899 |
2131a44085e2b70fc94726267cf13b2001e3bafd | 825 | py | Python | backend/manage.py | dwightgunning/django-ng-boilerplate | 226a661534f6c02be7f79ea771ffde518bbcf7f7 | [
"MIT"
] | 2 | 2018-05-06T00:42:10.000Z | 2021-07-07T03:48:56.000Z | backend/manage.py | dwightgunning/django-ng-boilerplate | 226a661534f6c02be7f79ea771ffde518bbcf7f7 | [
"MIT"
] | 8 | 2021-04-08T20:17:11.000Z | 2022-03-02T09:55:01.000Z | backend/manage.py | dwightgunning/django-ng-boilerplate | 226a661534f6c02be7f79ea771ffde518bbcf7f7 | [
"MIT"
] | 2 | 2021-09-06T11:55:40.000Z | 2021-12-23T04:03:44.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangongboilerplate.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # NOQA
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.869565 | 83 | 0.646061 |
37877aa2956ed021c05f013753932ae6ff9b0fcb | 276 | py | Python | database/schemes/437/script/runTest.py | TonnaMajesty/test | 68b24d1f3e8b4d6154c9d896a7fa3e2f99b49a6f | [
"MIT"
] | 1 | 2022-03-23T03:14:41.000Z | 2022-03-23T03:14:41.000Z | database/schemes/437/script/runTest.py | TonnaMajesty/test | 68b24d1f3e8b4d6154c9d896a7fa3e2f99b49a6f | [
"MIT"
] | null | null | null | database/schemes/437/script/runTest.py | TonnaMajesty/test | 68b24d1f3e8b4d6154c9d896a7fa3e2f99b49a6f | [
"MIT"
] | null | null | null | # coding:utf-8
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from script import addPathToPython, initSettings, selectModel
addPathToPython()
initSettings()
selectModel()
from SRC.main import Main
Main('系统日志.xml').run()
| 18.4 | 76 | 0.775362 |
6741a21c9b7ebce01e3a2130edd5cd4efd378b48 | 1,904 | py | Python | python/client/ssdp_protocol.py | PetarZecevic/smart-farm | d118e30cfdaacf75969013c25cd732e31a9345f0 | [
"Apache-2.0"
] | null | null | null | python/client/ssdp_protocol.py | PetarZecevic/smart-farm | d118e30cfdaacf75969013c25cd732e31a9345f0 | [
"Apache-2.0"
] | null | null | null | python/client/ssdp_protocol.py | PetarZecevic/smart-farm | d118e30cfdaacf75969013c25cd732e31a9345f0 | [
"Apache-2.0"
] | null | null | null | import socket
class SSDP_Protocol():
"""
This class is used to send sddp_request's,
and receive ssdp_responses.
"""
SSDP_MULTICAST_IP = '239.255.255.250'
SSDP_PORT = '1900'
SSDP_MSG_MAX_SIZE = 1024
search_message = \
'{0} * HTTP/1.1\r\n' \
'HOST:' + SSDP_MULTICAST_IP + ':' + SSDP_PORT + '\r\n' \
'ST: {1}\r\n' \
'MX: 1\r\n' \
'MAN:"ssdp:discover"\r\n' \
'\r\n'
def __init__(self, method, search_target):
self.method = method
self.search_target = search_target
self.protocol_socket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.protocol_socket.bind(('', int(SSDP_Protocol.SSDP_PORT)))
self.request_message = SSDP_Protocol.search_message.format(
self.method,
self.search_target
)
def send_request(self):
self.protocol_socket.sendto(self.request_message.encode(),
(SSDP_Protocol.SSDP_MULTICAST_IP.encode(), int(SSDP_Protocol.SSDP_PORT)))
def fetch_response(self, timeout_seconds):
"""
Return's response message as dictionary of header rows, name:value.
"""
self.protocol_socket.settimeout(timeout_seconds)
data_dict = {}
try:
data, addr = self.protocol_socket.recvfrom(
SSDP_Protocol.SSDP_MSG_MAX_SIZE)
header = data.decode('utf-8').splitlines()
if header[0] == 'HTTP/1.1 200 OK':
for line in header[1:]:
param_name = line.split(':', maxsplit=1)[0]
param_value = line.split(':', maxsplit=1)[1]
data_dict[param_name] = param_value
except socket.timeout:
return None
except IndexError:
return None
finally:
return data_dict
| 31.733333 | 85 | 0.574055 |
aaa0be1ef0af26e4416b57070f2268f23aadd3d1 | 468 | py | Python | ads/utils.py | GabrielDumbrava/django-ads-filer | b98ca8b18cca69c862693ccf79b727a3a9e4aa24 | [
"Apache-2.0"
] | 2 | 2020-01-03T09:09:24.000Z | 2021-07-13T15:38:46.000Z | ads/utils.py | vcealicu/django-ads-filer | b98ca8b18cca69c862693ccf79b727a3a9e4aa24 | [
"Apache-2.0"
] | null | null | null | ads/utils.py | vcealicu/django-ads-filer | b98ca8b18cca69c862693ccf79b727a3a9e4aa24 | [
"Apache-2.0"
] | 2 | 2018-07-10T15:41:04.000Z | 2018-07-10T15:42:22.000Z | from django.conf import settings
from django.utils.translation import gettext_lazy
def get_zones_choices():
for key in settings.ADS_ZONES:
yield (key, gettext_lazy(settings.ADS_ZONES[key].get('name', 'Undefined')))
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', None)
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
| 27.529412 | 83 | 0.700855 |
3c70f46a8b50d7bbebfc444dbf7ec276df5c8dc3 | 1,470 | py | Python | Chapter10/maxsubarray_v4.py | yoyboy/Software-Architecture-with-Python | c228a9c77e3d1d0e8651146611b34fed0a00bc5e | [
"MIT"
] | 103 | 2017-05-18T13:43:47.000Z | 2022-03-31T09:25:33.000Z | Chapter10/maxsubarray_v4.py | afcarl/Software-Architecture-with-Python | 36bd93017be025c1910d0d3d6ebf9996bc84a9f4 | [
"MIT"
] | 1 | 2021-09-20T03:12:06.000Z | 2021-09-20T03:12:06.000Z | Chapter10/maxsubarray_v4.py | afcarl/Software-Architecture-with-Python | 36bd93017be025c1910d0d3d6ebf9996bc84a9f4 | [
"MIT"
] | 76 | 2017-05-22T14:44:00.000Z | 2022-01-14T19:32:40.000Z | # Code Listing #4
"""
Maximum subarray problem - final version
"""
from contextlib import contextmanager
import random
import time
@contextmanager
def timer():
""" Measure real-time execution of a block of code """
try:
start = time.time()
yield
finally:
end = (time.time() - start)*1000
print('time taken=> %.2f ms' % end)
def num_array(size):
""" Return a list of numbers in a fixed random range
of given size """
nums = []
for i in range(size):
nums.append(random.randrange(-25, 30))
return nums
def max_subarray1(sequence):
""" Find sub-sequence in sequence having maximum sum """
# this is the version before the final version for testing purposes
max_sum, max_sub = 0, []
for i in range(len(sequence)):
for j in range(i+1, len(sequence)):
sub_seq = sequence[i:j+1]
sum_s = sum(sub_seq)
if sum_s > max_sum:
max_sum, max_sub = sum_s, sub_seq
return max_sum, max_sub
def max_subarray(sequence):
""" Maximum subarray - optimized version """
max_ending_here = max_so_far = 0
for x in sequence:
max_ending_here = max(0, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
if __name__ == "__main__":
#with timer():
# max_subarray1(num_array(10000))
print(max_subarray([-5, 20, -10, 30, 15]))
| 22.615385 | 71 | 0.606803 |
a0db06441a2bfa57eacd8bda4713b26b094c7825 | 3,083 | py | Python | video_demo.py | leonardohaig/yolov3_tensorflow | dcb9ac390849551f139f24782e31a39062a73695 | [
"MIT"
] | 1 | 2019-12-24T08:49:58.000Z | 2019-12-24T08:49:58.000Z | video_demo.py | leonardohaig/yolov3_tensorflow | dcb9ac390849551f139f24782e31a39062a73695 | [
"MIT"
] | null | null | null | video_demo.py | leonardohaig/yolov3_tensorflow | dcb9ac390849551f139f24782e31a39062a73695 | [
"MIT"
] | 1 | 2019-12-24T08:53:31.000Z | 2019-12-24T08:53:31.000Z | #!/usr/bin/env python3
#coding=utf-8
#============================#
#Program:video_demo.py
#Date:2019.08.14
#Author:liheng
#Version:V1.0
#============================#
__author__ = 'liheng'
import cv2
import time
import numpy as np
import core.utils as utils
import tensorflow as tf
# 张量名称
return_elements = ["input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"]
pb_file = "./yolov3_coco.pb"
video_path = "/media/liheng/0F521CCC0F521CCC/7.29/ADAS_usb4mm-20190729-173452.avi"
bSaveResult = False # 是否保存结果视频
# video_path = 0
num_classes = 80 # 检测目标数量
input_size = 416
graph = tf.Graph()
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)
with tf.Session(graph=graph) as sess:
vid = cv2.VideoCapture(video_path)
if bSaveResult:
videoWriter = cv2.VideoWriter(video_path+'_res.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
20,
(int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))))
nWaitTime = 1
nFrameIdx = 0
#cv2.namedWindow("result", cv2.WINDOW_NORMAL)
while True:
return_value, frame = vid.read()
nFrameIdx += 1
if not return_value:
print("No image!")
break
frame_size = frame.shape[:2]
image_data = utils.image_preprocess(np.copy(frame), [input_size, input_size])#图像预处理,转换为RGB格式,并进行缩放
image_data = image_data[np.newaxis, ...]#增加一维,shape从(416,416,3)变为(1,416,416,3)
prev_time = time.time()
pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
[return_tensors[1], return_tensors[2], return_tensors[3]],
feed_dict={ return_tensors[0]: image_data})
# pred_sbbox.shape (1,52,52,3,85)
# pred_mbbox.shape (1,26,26,3,85)
# pred_lbbox.shape (1,13,13,3,85)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),#shape:(8112,85)
np.reshape(pred_mbbox, (-1, 5 + num_classes)),#shape:(2028,85)
np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)#shape:(507,85),pred_bbox.shape:(10647,85)
bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, 0.3)#
bboxes = utils.nms(bboxes, 0.45, method='nms')
image = utils.draw_bbox(frame, bboxes)
curr_time = time.time()
exec_time = curr_time - prev_time
result = np.asarray(image)
info = "Frame:%d Fps: %.2f time: %.2f ms" %(nFrameIdx,1.0/exec_time,1000*exec_time)
cv2.putText(result, info, (0,25), cv2.FONT_HERSHEY_SIMPLEX,1.0, (0, 0, 255), 2, lineType=cv2.LINE_AA)
cv2.imshow("result", result)
if bSaveResult:
videoWriter.write(result)
key = cv2.waitKey(nWaitTime)
if 27==key:# ESC
break
elif 32==key:# space
nWaitTime = not nWaitTime
cv2.destroyAllWindows()
| 34.640449 | 133 | 0.593902 |
2420e8a1c52fb3a0fc54eba39942de076acdc9f2 | 5,900 | py | Python | phdfellows/models.py | sirkp/phdadmission | 80beae033e6a3a87435ea5fded107bb0dec116fd | [
"MIT"
] | 1 | 2020-01-26T12:42:12.000Z | 2020-01-26T12:42:12.000Z | phdfellows/models.py | sirkp/phdadmission | 80beae033e6a3a87435ea5fded107bb0dec116fd | [
"MIT"
] | null | null | null | phdfellows/models.py | sirkp/phdadmission | 80beae033e6a3a87435ea5fded107bb0dec116fd | [
"MIT"
] | null | null | null | from django.db import models
from accounts.models import User
from datetime import date
from django.utils.translation import gettext as _
from django.core.validators import RegexValidator
class Application(models.Model):
"""
this is the model for application that needs to be filled for applying for admission in phd or ms.
It is linked User model by ForeignKey. It has unique application no
"""
user = models.ForeignKey(User,on_delete=models.DO_NOTHING)
category = models.CharField(default='Select',max_length=100)
is_category_other = models.BooleanField(default=False)
having_disability = models.BooleanField("Disabled",default=False)
research_area = models.CharField(default='Select',max_length=50)
is_research_area_other = models.BooleanField(default=False)
applying_for_list = [
('','Select'),
('PhD','PhD'),
('MS','MS'),
]
applying_for = models.CharField(choices=applying_for_list,default='Select',max_length=5)
enrollment_type_list = [
('','Select'),
('Full time','Full time'),
('Part time','Part time'),
]
enrollment_type = models.CharField(default='Select',choices=enrollment_type_list,max_length=50)
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
email = models.EmailField(max_length=200)
date_of_birth = models.CharField("Date of Birth",null=True,max_length=10)
married = models.BooleanField(default=False)
gender_list = [
('','Select'),
('Male','Male'),
('Female','Female'),
('Other','Other'),
]
gender = models.CharField(default='Select',choices=gender_list,max_length=20)
phone_no = models.CharField("Phone Number(With Country Code)",max_length=13)
address = models.TextField()
country = models.CharField(max_length=200)
state = models.CharField(null=True,max_length=200)
city = models.CharField(max_length=200)
pin_code = models.CharField(validators=[RegexValidator(regex='^.{6}$',message='Length has to be 6', code='nomatch')],max_length=6)
score_in_ug = models.FloatField("Score in UG",default=None ,null=True)
scale_score_list = [
('','Select'),
('0-5 CGPA','0-5 CGPA',),
('0-10 CGPA','0-10 CGPA',),
('0-100%','0-100%',),
]
scale_of_score_ug = models.CharField("Scale of Score",default='Select',choices=scale_score_list,max_length=20)
ug_discipline = models.CharField("UG Discipline",default='Select',max_length=100)
is_ug_discipline_other = models.BooleanField(default=False)
ug_college_or_university = models.CharField('UG College/University',max_length=200)
pg_passed_or_expected_to_pass_in_year = models.IntegerField("PG Passed Year or Expected to Pass in Year",validators=[RegexValidator(regex='^.{4}$',message='Length has to be 4',code='nomatch')],default=None ,null=True)
score_in_pg = models.FloatField("Score in PG", default=None ,null=True)
scale_of_score_pg = models.CharField("Scale of Score",default='Select',choices=scale_score_list, max_length=20)
pg_discipline = models.CharField("PG Discipline",default='Select',max_length=100)
is_pg_discipline_other = models.BooleanField(default=False)
pg_college_or_university = models.CharField('PG College/University', max_length=200)
qualifying_examination_list = [
('','Select'),
("GATE","GATE"),
("NET","NET"),
]
qualifying_examination = models.CharField(default='Select',choices=qualifying_examination_list,max_length=20)
branch_code_for_qualifying_exam = models.CharField(default='Select', max_length=10)
is_branch_code_for_qualifying_exam_other = models.BooleanField(default=False)
qualifying_exam_score_valid_upto = models.IntegerField(validators=[RegexValidator(regex='^.{4}$',message='Length has to be 4', code='nomatch')],default=None ,null=True)
all_india_rank_in_qualifying_exam = models.IntegerField("All India rank in Qualifying Exam",default=None ,null=True)
score_in_qualifying_exam = models.FloatField(default=None ,null=True)
work_experience_in_year = models.IntegerField("Work Experpience(in years)",default=0 ,null=True)
type_of_work = models.CharField(default='Select',max_length=100)
is_type_of_work_other = models.BooleanField(default=False)
no_of_peer_reviewed_publications = models.IntegerField(default=0 ,null=True)
no_of_patents_granted = models.IntegerField(default=0 ,null=True)
guide_preference_1 = models.CharField(max_length=200)
guide_preference_2 = models.CharField(max_length=200)
guide_preference_3 = models.CharField(max_length=200)
status_list = [
('Draft','Draft'),
('Submitted','Submitted'),
('Shortlisted for Test','Shortlisted for Test'),
('Shortlisted for Interview','Shortlisted for Interview'),
('Selected','Selected'),
('Rejected','Rejected'),
]
current_status = models.CharField(default='Draft',choices=status_list,max_length=30)
submitted_at = models.DateField(blank=True,null=True)
submitted_year = models.CharField(max_length=4)
application_no = models.IntegerField(unique=True,default=None ,null=True)
was_selected_for_interview = models.BooleanField(default=False)
def __str__(self):
return str(self.application_no)
class Meta:
ordering = ('-submitted_at',)
# unique_together = ('submitted_year','applying_for','email')
class WrittenTestScore(models.Model):
"""
model for written test assessment, linked with Application model
"""
application_no = models.OneToOneField(Application,on_delete=models.CASCADE)
written_test_score = models.IntegerField(null=True)
programming_test_score = models.IntegerField(null=True)
def __str__(self):
return str(self.application_no)
| 37.106918 | 221 | 0.714576 |
a9cf15e773112986cf0fc0360b1623d5cb928063 | 10,134 | py | Python | Generators/ORM/EolCohortGenerator.py | neilmdixit/omop-learn | c9ef64d1af379b9e7d99d60995f2c50982e1f521 | [
"MIT"
] | 56 | 2020-07-21T04:50:41.000Z | 2022-03-30T17:10:18.000Z | Generators/ORM/EolCohortGenerator.py | neilmdixit/omop-learn | c9ef64d1af379b9e7d99d60995f2c50982e1f521 | [
"MIT"
] | 6 | 2021-03-05T21:11:25.000Z | 2021-06-10T17:11:41.000Z | Generators/ORM/EolCohortGenerator.py | neilmdixit/omop-learn | c9ef64d1af379b9e7d99d60995f2c50982e1f521 | [
"MIT"
] | 16 | 2020-07-09T03:48:25.000Z | 2022-01-22T02:25:44.000Z | import pandas as pd
from config import omop_schema, user_schema
from sqlalchemy import Column, BigInteger, Integer, String
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from sqlalchemy import func, and_, or_
from sqlalchemy.sql.expression import select, literal, case
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class EolCohortTable(Base):
"""End of Life (EOL) OMOP cohort.
SqlAlchemy ORM class definition that can be used to define an EOL cohort.
Keyword arguments
----------
training_start_date -- datetime.datetime, The earliest date from which to look for eligible people for the cohort.
training_end_date -- datetime.datetime, The latest date from which to look for eligible people for the cohort.
gap_months -- int, The number of months between training_end_date and the start of the prediction date range.
outcome_months -- int, The number of months between the start and end of the prediction date range.
min_enroll_proportion -- float, A number between 0 and 1. The minimum proportion of days a member needs to be enrolled during training and prediction date ranges.
"""
__tablename__ = 'eol_cohort'
__table_args__ = {
'schema': user_schema
}
training_start_date = None
training_end_date = None
gap_months = None
outcome_months = None
min_enroll_proportion = 0.95
# For backward compatibility with original FeatureGenerator class
_cohort = None
_schema_name = user_schema
_cohort_table_name = 'eol_cohort'
_dtype = {}
_dtype['example_id'] = int
_dtype['person_id'] = int
_dtype['person_source_value'] = str
_dtype['start_date'] = str
_dtype['end_date'] = str
# Table columns
example_id = Column(Integer, primary_key=True, nullable=True)
person_id = Column(BigInteger)
start_date = Column(String(length=20))
end_date = Column(String(length=20))
outcome_date = Column(String(length=20))
y = Column(Integer)
def __repr__(self):
return (
"<EolCohort(example_id='%s', person_id='%s', start_date='%s', end_date='%s', outcome_date='%s', y='%s')>" % (
self.example_id, self.person_id, self.start_date, self.end_date, self.outcome_date, self.y
)
)
def build(self, db, replace=False):
"""Build an End of Life (EOL) OMOP cohort.
Contains the logic to build an EOL cohort, including:
1. Find members > 70 years old with data from the training and prediction windows,
2. Mark members who have a death date during the prediction window as 1, otherwise 0.
Parameters
----------
db: One of the database classes defined in Utils.ORM, such as PostgresDatabase.
Notes
-----
Does not return anything. However, populates a table, eol_cohort, in a schema based on the value set in config.user_schema. Also stores a pandas dataframe of the results in the _cohort class variable.
"""
if replace:
with db.session.session_manager() as session:
try:
self.__table__.drop(session.get_bind()) # Drop the table if it already exists, otherwise we wouldn't be calling build
except:
pass
self.__table__.create(session.get_bind())
# Step 1: Add table references from db parameter (db contains an 'inspector', as referenced in InspectOMOP docs).
Person = db.inspector.tables['person']
ObservationPeriod = db.inspector.tables['observation_period']
# Step 1: Get death dates of members
death_dates = session.query(
Person.person_id,
Person.death_datetime
)\
.subquery('death_dates')
# Step 2: Find members greater than 70 years of age
eligible_people = session.query(
Person.person_id
)\
.filter(
(self.training_end_date.year - Person.year_of_birth) > 70
)\
.subquery('eligible_people')
# Step 3: Calculate prediction window
prediction_start_date = self.training_end_date + relativedelta(months=self.gap_months)
prediction_end_date = self.training_end_date + relativedelta(months=self.gap_months+self.outcome_months)
# Step 4: Identify person IDs that occur during our training timeframe
least_end_date = case(
[(ObservationPeriod.observation_period_end_date < self.training_end_date, ObservationPeriod.observation_period_end_date)],
else_ = self.training_end_date
)
greatest_start_date = case(
[(ObservationPeriod.observation_period_start_date > self.training_start_date, ObservationPeriod.observation_period_start_date)],
else_ = self.training_start_date
)
num_days_expr = case(
[((least_end_date - greatest_start_date) > timedelta(0), least_end_date - greatest_start_date)],
else_ = timedelta(0)
)
training_elig_counts = session\
.query(
ObservationPeriod.person_id,
num_days_expr.label('num_days')
)\
.join(eligible_people, ObservationPeriod.person_id == eligible_people.c.person_id)\
.subquery('training_elig_counts')
training_window_elig_percent = session\
.query(
training_elig_counts.c.person_id
)\
.group_by(training_elig_counts.c.person_id)\
.having(func.sum(training_elig_counts.c.num_days) >= (self.min_enroll_proportion * (self.training_end_date - self.training_start_date)))\
.subquery('training_window_elig_percent')
least_end_date = case(
[(ObservationPeriod.observation_period_end_date < prediction_end_date, ObservationPeriod.observation_period_end_date)],
else_ = prediction_end_date
)
greatest_start_date = case(
[(ObservationPeriod.observation_period_start_date > self.training_end_date, ObservationPeriod.observation_period_start_date)],
else_ = self.training_end_date
)
num_days_expr = case(
[((least_end_date - greatest_start_date) > timedelta(0), least_end_date - greatest_start_date)],
else_ = timedelta(0)
)
test_period_elig_counts = session\
.query(ObservationPeriod.person_id, num_days_expr.label('num_days'))\
.join(training_window_elig_percent, training_window_elig_percent.c.person_id == ObservationPeriod.person_id)\
.filter(ObservationPeriod.person_id > 0)\
.subquery('test_period_elig_counts')
test_window_elig_percent = session\
.query(test_period_elig_counts.c.person_id)\
.join(death_dates, test_period_elig_counts.c.person_id == death_dates.c.person_id)\
.group_by(test_period_elig_counts.c.person_id, death_dates.c.death_datetime)\
.having(
or_(
and_(death_dates.c.death_datetime >= prediction_start_date, death_dates.c.death_datetime <= prediction_end_date)
, func.sum(test_period_elig_counts.c.num_days) >=
(self.min_enroll_proportion * (prediction_end_date - self.training_end_date))
)
)\
.subquery('test_window_elig_percent')
# Step 5: Define our final table of person_id, start_date, end_date, and y
# IMPORTANT: MIT's feature creation process relies on this table being sorted in ascending order by
# person id and example id. example id is auto populated based on our table definition.
labeler = case(
[(death_dates.c.death_datetime.between(prediction_start_date, prediction_end_date), 1)], else_ = 0
).label('y')
cohort = session\
.query(test_window_elig_percent.c.person_id,
literal(self.training_start_date).label('start_date'),
literal(self.training_end_date).label('end_date'),
death_dates.c.death_datetime.label('outcome_date'),
labeler)\
.join(death_dates, death_dates.c.person_id == test_window_elig_percent.c.person_id, isouter=True)\
.filter(or_(death_dates.c.death_datetime == None, death_dates.c.death_datetime >= prediction_start_date))\
.order_by(test_window_elig_percent.c.person_id)\
.subquery('cohort')
# Step 6: Insert the cohort to our current table
sel = select([cohort])
ins = self.__table__.insert().from_select(['person_id', 'start_date', 'end_date', 'outcome_date', 'y'], sel)
session.execute(ins)
# Save a representation of our cohort into a local pandas data frame for use in feature generation code
self._cohort = pd.read_sql("SELECT * FROM " + user_schema + "." + self._cohort_table_name, db.engine)
for date_col in ['start_date', 'end_date']:
self._cohort[date_col] = pd.to_datetime(self._cohort[date_col])
self._cohort = self._cohort.astype(
{k:v for k,v in self._dtype.items() if k in self._cohort.columns}
)
| 49.676471 | 208 | 0.607559 |
d807ac09a60fa3662cdffae1a5764b31d8241078 | 1,118 | py | Python | setup/terraform/resources/labs/utils/kafka.py | rch/edge2ai-workshop | 32af34e39439d17c2a73ba85e69248e88928a187 | [
"Apache-2.0"
] | 61 | 2019-08-21T12:33:34.000Z | 2022-02-11T14:52:46.000Z | setup/terraform/resources/labs/utils/kafka.py | rch/edge2ai-workshop | 32af34e39439d17c2a73ba85e69248e88928a187 | [
"Apache-2.0"
] | 20 | 2019-10-16T15:11:01.000Z | 2021-03-31T05:34:56.000Z | setup/terraform/resources/labs/utils/kafka.py | rch/edge2ai-workshop | 32af34e39439d17c2a73ba85e69248e88928a187 | [
"Apache-2.0"
] | 74 | 2019-08-21T12:33:52.000Z | 2022-02-17T03:28:56.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from . import *
def _get_port():
return '9093' if is_tls_enabled() else '9092'
def _get_bootstrap_servers():
return get_hostname() + ':' + _get_port()
def get_common_client_properties(env, client_type, consumer_group_id, client_id):
props = {
'bootstrap.servers': _get_bootstrap_servers(),
}
if client_type == 'producer':
props.update({
'use-transactions': 'false',
'attribute-name-regex': 'schema.*',
'client.id': client_id,
})
else: # consumer
props.update({
'honor-transactions': 'false',
'group.id': consumer_group_id,
'auto.offset.reset': 'latest',
'header-name-regex': 'schema.*',
})
if is_tls_enabled():
props.update({
'kerberos-credentials-service': env.keytab_svc.id,
'sasl.kerberos.service.name': 'kafka',
'sasl.mechanism': 'GSSAPI',
'security.protocol': 'SASL_SSL',
'ssl.context.service': env.ssl_svc.id,
})
return props
| 27.95 | 81 | 0.564401 |
013be7da2e8768a855558a27a8cb9d888f4fecfb | 9,234 | py | Python | src/ebay_rest/api/commerce_charity/models/charity_org.py | matecsaj/ebay_rest | dd23236f39e05636eff222f99df1e3699ce47d4a | [
"MIT"
] | 3 | 2021-12-12T04:28:03.000Z | 2022-03-10T03:29:18.000Z | src/ebay_rest/api/commerce_charity/models/charity_org.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 33 | 2021-06-16T20:44:36.000Z | 2022-03-30T14:55:06.000Z | src/ebay_rest/api/commerce_charity/models/charity_org.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 7 | 2021-06-03T09:30:23.000Z | 2022-03-08T19:51:33.000Z | # coding: utf-8
"""
Charity API
The Charity API allows third-party developers to search for and access details on supported charitable organizations. # noqa: E501
OpenAPI spec version: v1.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CharityOrg(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'charity_org_id': 'str',
'description': 'str',
'location': 'Location',
'logo_image': 'Image',
'mission_statement': 'str',
'name': 'str',
'registration_id': 'str',
'website': 'str'
}
attribute_map = {
'charity_org_id': 'charityOrgId',
'description': 'description',
'location': 'location',
'logo_image': 'logoImage',
'mission_statement': 'missionStatement',
'name': 'name',
'registration_id': 'registrationId',
'website': 'website'
}
def __init__(self, charity_org_id=None, description=None, location=None, logo_image=None, mission_statement=None, name=None, registration_id=None, website=None): # noqa: E501
"""CharityOrg - a model defined in Swagger""" # noqa: E501
self._charity_org_id = None
self._description = None
self._location = None
self._logo_image = None
self._mission_statement = None
self._name = None
self._registration_id = None
self._website = None
self.discriminator = None
if charity_org_id is not None:
self.charity_org_id = charity_org_id
if description is not None:
self.description = description
if location is not None:
self.location = location
if logo_image is not None:
self.logo_image = logo_image
if mission_statement is not None:
self.mission_statement = mission_statement
if name is not None:
self.name = name
if registration_id is not None:
self.registration_id = registration_id
if website is not None:
self.website = website
@property
def charity_org_id(self):
"""Gets the charity_org_id of this CharityOrg. # noqa: E501
The ID of the charitable organization. # noqa: E501
:return: The charity_org_id of this CharityOrg. # noqa: E501
:rtype: str
"""
return self._charity_org_id
@charity_org_id.setter
def charity_org_id(self, charity_org_id):
"""Sets the charity_org_id of this CharityOrg.
The ID of the charitable organization. # noqa: E501
:param charity_org_id: The charity_org_id of this CharityOrg. # noqa: E501
:type: str
"""
self._charity_org_id = charity_org_id
@property
def description(self):
"""Gets the description of this CharityOrg. # noqa: E501
The description of the charitable organization. # noqa: E501
:return: The description of this CharityOrg. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CharityOrg.
The description of the charitable organization. # noqa: E501
:param description: The description of this CharityOrg. # noqa: E501
:type: str
"""
self._description = description
@property
def location(self):
"""Gets the location of this CharityOrg. # noqa: E501
:return: The location of this CharityOrg. # noqa: E501
:rtype: Location
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this CharityOrg.
:param location: The location of this CharityOrg. # noqa: E501
:type: Location
"""
self._location = location
@property
def logo_image(self):
"""Gets the logo_image of this CharityOrg. # noqa: E501
:return: The logo_image of this CharityOrg. # noqa: E501
:rtype: Image
"""
return self._logo_image
@logo_image.setter
def logo_image(self, logo_image):
"""Sets the logo_image of this CharityOrg.
:param logo_image: The logo_image of this CharityOrg. # noqa: E501
:type: Image
"""
self._logo_image = logo_image
@property
def mission_statement(self):
"""Gets the mission_statement of this CharityOrg. # noqa: E501
The mission statement of the charitable organization. # noqa: E501
:return: The mission_statement of this CharityOrg. # noqa: E501
:rtype: str
"""
return self._mission_statement
@mission_statement.setter
def mission_statement(self, mission_statement):
"""Sets the mission_statement of this CharityOrg.
The mission statement of the charitable organization. # noqa: E501
:param mission_statement: The mission_statement of this CharityOrg. # noqa: E501
:type: str
"""
self._mission_statement = mission_statement
@property
def name(self):
"""Gets the name of this CharityOrg. # noqa: E501
The name of the charitable organization. # noqa: E501
:return: The name of this CharityOrg. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CharityOrg.
The name of the charitable organization. # noqa: E501
:param name: The name of this CharityOrg. # noqa: E501
:type: str
"""
self._name = name
@property
def registration_id(self):
"""Gets the registration_id of this CharityOrg. # noqa: E501
The registration ID for the charitable organization.<br /><br /><span class=\"tablenote\"><span style=\"color:#004680\"><strong>Note:</strong></span> For the US marketplace, this is the EIN.</span> # noqa: E501
:return: The registration_id of this CharityOrg. # noqa: E501
:rtype: str
"""
return self._registration_id
@registration_id.setter
def registration_id(self, registration_id):
"""Sets the registration_id of this CharityOrg.
The registration ID for the charitable organization.<br /><br /><span class=\"tablenote\"><span style=\"color:#004680\"><strong>Note:</strong></span> For the US marketplace, this is the EIN.</span> # noqa: E501
:param registration_id: The registration_id of this CharityOrg. # noqa: E501
:type: str
"""
self._registration_id = registration_id
@property
def website(self):
"""Gets the website of this CharityOrg. # noqa: E501
The link to the website for the charitable organization. # noqa: E501
:return: The website of this CharityOrg. # noqa: E501
:rtype: str
"""
return self._website
@website.setter
def website(self, website):
"""Sets the website of this CharityOrg.
The link to the website for the charitable organization. # noqa: E501
:param website: The website of this CharityOrg. # noqa: E501
:type: str
"""
self._website = website
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CharityOrg, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CharityOrg):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.27541 | 219 | 0.600606 |
1091fa8a36d0a3856b423bc7d8d3a8364611bf17 | 2,585 | py | Python | src/spring-cloud/azext_spring_cloud/_client_factory.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/spring-cloud/azext_spring_cloud/_client_factory.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/spring-cloud/azext_spring_cloud/_client_factory.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
from .vendored_sdks.appplatform.v2020_07_01 import AppPlatformManagementClient
from .vendored_sdks.appplatform.v2020_11_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20201101preview
)
from .vendored_sdks.appplatform.v2022_01_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20220101preview
)
from .vendored_sdks.appplatform.v2021_06_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20210601preview
)
from .vendored_sdks.appplatform.v2021_09_01_preview import (
AppPlatformManagementClient as AppPlatformManagementClient_20210901preview
)
def cf_spring_cloud_20220101preview(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20220101preview)
def cf_spring_cloud(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient)
def cf_spring_cloud_20201101preview(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20201101preview)
def cf_spring_cloud_20210601preview(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20210601preview)
def cf_spring_cloud_20210901preview(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, AppPlatformManagementClient_20210901preview)
def cf_resource_groups(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resource_groups
def cf_app_services(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).services
def cf_apps(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).apps
def cf_deployments(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).deployments
def cf_bindings(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).bindings
def cf_config_servers(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).config_servers
def cf_certificates(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).certificates
def cf_custom_domains(cli_ctx, *_):
return cf_spring_cloud(cli_ctx).custom_domains
| 35.410959 | 94 | 0.765957 |
4f0cde93cda0f8c6aa10d7d94c84e58a2100240b | 3,206 | py | Python | backtoshops/countries/views.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | null | null | null | backtoshops/countries/views.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | 6 | 2021-03-31T19:21:50.000Z | 2022-01-13T01:46:09.000Z | backtoshops/countries/views.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import json
from django.http import HttpResponse
from countries.models import CaProvince
from countries.models import CnProvince
from countries.models import CountryXCurrency
from countries.models import UsState
from sales.models import ProductCurrency
COUNTRY_STATE_MODEL_MAP = {
'US': UsState,
'CA': CaProvince,
'CN': CnProvince,
}
def get_country_states(request, *args, **kwargs):
states_list = []
country_iso = kwargs.get('cid', None)
if country_iso:
state_model = COUNTRY_STATE_MODEL_MAP.get(country_iso, None)
if state_model:
states = state_model.objects.all()
for state in states:
states_list.append({'label': state.name, 'value': state.abbrev})
return HttpResponse(json.dumps(states_list), mimetype='application/json')
def get_country_x_currency(request, *args, **kwargs):
product_currencies = ProductCurrency.objects.all().values_list('code',
flat=True)
mapping = {}
for country, currency in CountryXCurrency.objects.all().values_list(
'country_id', 'currency'):
if currency in product_currencies and country not in mapping:
mapping[country] = currency
return HttpResponse(json.dumps(mapping), mimetype='application/json')
| 41.636364 | 80 | 0.687461 |
b17d477e71c0314f3548bda2e1fcd538fe474bf2 | 1,162 | py | Python | gradient_text.py | crealu/py-writing | 7147063bbc7f5f133946f5f4627e960fd3da2a55 | [
"MIT"
] | null | null | null | gradient_text.py | crealu/py-writing | 7147063bbc7f5f133946f5f4627e960fd3da2a55 | [
"MIT"
] | null | null | null | gradient_text.py | crealu/py-writing | 7147063bbc7f5f133946f5f4627e960fd3da2a55 | [
"MIT"
] | null | null | null | from PIL import Image, ImageFilter
from PIL import ImageFont, ImageDraw
from gradient_math import get_gradation_3d as gg3
import numpy as np
import os
import random
currentDir = os.getcwd()
img1 = Image.open(currentDir + '/img/foliage.jpg')
width = img1.size[0]
height = img1.size[1]
eachPixel = []
widthRange = range(0, width)
for w in widthRange:
pixel = img1.getpixel((w, 10))
eachPixel.append(pixel)
# gradient number
gn = 0
while gn < 5:
random1 = eachPixel[random.randint(0, len(eachPixel))]
random2 = eachPixel[random.randint(0, len(eachPixel))]
array = gg3(512, 256, random1, random2, (True, True, True))
newGradient = Image.fromarray(np.uint8(array))
draw = ImageDraw.Draw(newGradient)
font = ImageFont.truetype("Comfortaa[wght].ttf", 18)
draw.text((10, 10), "Color 1: \n" + str(random1), (255, 255, 255), font=font)
draw.text((380, 10), "Color 2: \n" + str(random2), (255, 255, 255), font=font)
newGradient.show()
keep = input('Keep this gradient? (y/n) ')
if (keep is 'y'):
newGradient.save(currentDir + '/gradients/random_gradient' + str(gn) + '.jpg', quality=95)
gn += 1
| 25.822222 | 99 | 0.666093 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.