hexsha
stringlengths
40
40
size
int64
4
1.02M
ext
stringclasses
8 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
209
max_stars_repo_name
stringlengths
5
121
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
209
max_issues_repo_name
stringlengths
5
121
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
209
max_forks_repo_name
stringlengths
5
121
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
4
1.02M
avg_line_length
float64
1.07
66.1k
max_line_length
int64
4
266k
alphanum_fraction
float64
0.01
1
cfd7a9144a8f14bba80673d9aa02999583a7a780
2,774
py
Python
oldtest/4tf.py
han20192019/contextmbo
d188f0fd0043cdfd37d9660f37682eef053044fd
[ "MIT" ]
null
null
null
oldtest/4tf.py
han20192019/contextmbo
d188f0fd0043cdfd37d9660f37682eef053044fd
[ "MIT" ]
null
null
null
oldtest/4tf.py
han20192019/contextmbo
d188f0fd0043cdfd37d9660f37682eef053044fd
[ "MIT" ]
null
null
null
from design_baselines.cbas import cbas from design_baselines.rep import rep from design_baselines.rep_coms_cleaned import coms_cleaned seed = [1] mmd_param = [100, 500, 1000] #[0.1, 0, 10, 100, 500, 1000] dim = 32 #16,32,64,128,256 for s in seed: for m in mmd_param: coms_cleaned(logging_dir = "/nfs/kun2/users/hanqi2019/1207discrete/tf8/tfrep"+str(dim)+"seed"+str(s)+"mmd"+str(m), task='TFBind8-Exact-v0', #ToyContinuous-Exact-V0,#'AntMorphology-Exact-v0', #HopperController-Exact-v0 #Discrete: TFBind8-Exact-v0 task_relabel=True, normalize_ys=True, normalize_xs=True, in_latent_space=False, vae_hidden_size=64, vae_latent_size=256, vae_activation='relu', vae_kernel_size=3, vae_num_blocks=4, vae_lr=0.0003, vae_beta=1.0, vae_batch_size=32, vae_val_size=200, vae_epochs=10, particle_lr=0.05, particle_train_gradient_steps=50, particle_evaluate_gradient_steps=50, particle_entropy_coefficient=0.0, forward_model_activations=['relu', 'relu'], forward_model_hidden_size=2048, forward_model_final_tanh=False, forward_model_lr=0.0003, forward_model_alpha=0.1, forward_model_alpha_lr=0.01, forward_model_overestimation_limit=0.5, forward_model_noise_std=0.0, forward_model_batch_size=128, forward_model_val_size=200, forward_model_epochs=500, evaluation_samples=128, fast=False, latent_space_size=[dim,1], rep_model_activations=['relu', 'relu'], rep_model_lr=0.0003, rep_model_hidden_size=2048, policy_model_lr=0.0003, noise_input = [1, 10], mmd_param = m, seed = s )
55.48
162
0.425018
c6491a4494950ee46f39d64d0518cb83e710b624
1,121
py
Python
solutions/server/server-15-crud-collaborator/server/database/upload.py
FroeMic/CDTM-Backend-Workshop
de3ef16dc89dfd1217565ab2dd4aec753e59cda0
[ "MIT" ]
null
null
null
solutions/server/server-15-crud-collaborator/server/database/upload.py
FroeMic/CDTM-Backend-Workshop
de3ef16dc89dfd1217565ab2dd4aec753e59cda0
[ "MIT" ]
null
null
null
solutions/server/server-15-crud-collaborator/server/database/upload.py
FroeMic/CDTM-Backend-Workshop
de3ef16dc89dfd1217565ab2dd4aec753e59cda0
[ "MIT" ]
null
null
null
from utils import * from server import app def db_get_filenames_for_task(task_id): ''' Returns a list of all files for a tasks from the database ''' query = ''' SELECT filename FROM Uploads WHERE task = ?; ''' with app.app_context(): db = get_db() cur = db.cursor() cur.execute(query, [task_id]) db.commit() return [dict_from_row(row)['filename'] for row in cur] def db_create_file(task_id, filename): ''' Inserts a new file ''' query = ''' INSERT OR REPLACE INTO Uploads(task, filename) Values (?, ?); ''' with app.app_context(): db = get_db() cur = db.cursor() cur.execute(query, [task_id, filename]) db.commit() def db_delete_file(task_id, filename): ''' Deletes the file with the task_id and filename ''' query = ''' DELETE FROM Uploads WHERE task = ? AND filename = ?; ''' with app.app_context(): db = get_db() cur = db.cursor() cur.execute(query, [task_id, filename]) db.commit()
23.851064
69
0.55397
2fb37d0fd9a66d6f510127eff0d509763c773055
364
py
Python
train/learningrate/ExponentialLR.py
Beautyya/BenchENA
5f5491614fc2f00ca26dc29f35f44c334db4718c
[ "MIT" ]
1
2022-01-21T07:56:13.000Z
2022-01-21T07:56:13.000Z
train/learningrate/ExponentialLR.py
Beautyya/BenchENA
5f5491614fc2f00ca26dc29f35f44c334db4718c
[ "MIT" ]
1
2022-02-24T06:07:27.000Z
2022-02-24T06:07:27.000Z
BenchENAS_python_package/train/learningrate/ExponentialLR.py
benchenas/BenchENAS
776cd1dd035d73c4af369d0106d010b932f64782
[ "MIT" ]
1
2022-01-20T07:36:37.000Z
2022-01-20T07:36:37.000Z
# coding=utf-8 from train.learningrate.learningrate import BaseLearningRate import torch class ExponentialLR(BaseLearningRate): """ExponentialLR """ def __init__(self, **kwargs): super(ExponentialLR, self).__init__(**kwargs) def get_learning_rate(self): return torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.2)
22.75
80
0.728022
beef756529228c025c6f9f8eed23fe270af48270
100
py
Python
jayson/__main__.py
pyrustic/jayson
31338a6739dc13f574e6fe5df7a2520c973c89d8
[ "MIT" ]
null
null
null
jayson/__main__.py
pyrustic/jayson
31338a6739dc13f574e6fe5df7a2520c973c89d8
[ "MIT" ]
null
null
null
jayson/__main__.py
pyrustic/jayson
31338a6739dc13f574e6fe5df7a2520c973c89d8
[ "MIT" ]
null
null
null
def main(): print("https://github.com/pyrustic/jayson") if __name__ == "__main__": main()
14.285714
47
0.62
fadc699c176bd3c53841ed83e4c8c08cb4054e84
690
py
Python
chap8/mxnet/score_model.py
wang420349864/dlcv_for_beginners
080c7d3bbb4a68e4fb79e33231ccc666ada16dcc
[ "BSD-3-Clause" ]
1,424
2017-01-04T12:08:01.000Z
2022-03-31T02:57:24.000Z
chap8/mxnet/score_model.py
wang420349864/dlcv_for_beginners
080c7d3bbb4a68e4fb79e33231ccc666ada16dcc
[ "BSD-3-Clause" ]
39
2017-03-16T08:48:28.000Z
2021-03-03T11:30:23.000Z
chap8/mxnet/score_model.py
wang420349864/dlcv_for_beginners
080c7d3bbb4a68e4fb79e33231ccc666ada16dcc
[ "BSD-3-Clause" ]
703
2017-02-22T19:35:45.000Z
2022-03-21T01:31:27.000Z
import mxnet as mx test_dataiter = mx.io.ImageRecordIter( path_imgrec="../data/test.rec", data_shape=(1, 28, 28), batch_size=100, mean_r=128, scale=0.00390625, ) mod = mx.mod.Module.load('mnist_lenet', 35, context=mx.gpu(2)) mod.bind( data_shapes=test_dataiter.provide_data, label_shapes=test_dataiter.provide_label, for_training=False) ''' # in case we need to continue to train from epoch 35 mod.fit(..., arg_params=arg_params, aux_params=aux_params, begin_epoch=35) ''' metric = mx.metric.create('acc') mod.score(test_dataiter, metric) for name, val in metric.get_name_value(): print('{}={:.2f}%'.format(name, val*100))
23
62
0.675362
125b524698618e946f499b8040dce7a651124379
33,233
py
Python
python/kserve/kserve/models/v1beta1_paddle_server_spec.py
Suresh-Nakkeran/kserve
d3910e0fc6af4bf73156a53bd912d6e4acc87533
[ "Apache-2.0" ]
null
null
null
python/kserve/kserve/models/v1beta1_paddle_server_spec.py
Suresh-Nakkeran/kserve
d3910e0fc6af4bf73156a53bd912d6e4acc87533
[ "Apache-2.0" ]
null
null
null
python/kserve/kserve/models/v1beta1_paddle_server_spec.py
Suresh-Nakkeran/kserve
d3910e0fc6af4bf73156a53bd912d6e4acc87533
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 kubeflow.org. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 """ KServe Python SDK for KServe # noqa: E501 The version of the OpenAPI document: v0.1 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kserve.configuration import Configuration class V1beta1PaddleServerSpec(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'args': 'list[str]', 'command': 'list[str]', 'env': 'list[V1EnvVar]', 'env_from': 'list[V1EnvFromSource]', 'image': 'str', 'image_pull_policy': 'str', 'lifecycle': 'V1Lifecycle', 'liveness_probe': 'V1Probe', 'name': 'str', 'ports': 'list[V1ContainerPort]', 'protocol_version': 'str', 'readiness_probe': 'V1Probe', 'resources': 'V1ResourceRequirements', 'runtime_version': 'str', 'security_context': 'V1SecurityContext', 'startup_probe': 'V1Probe', 'stdin': 'bool', 'stdin_once': 'bool', 'storage_uri': 'str', 'termination_message_path': 'str', 'termination_message_policy': 'str', 'tty': 'bool', 'volume_devices': 'list[V1VolumeDevice]', 'volume_mounts': 'list[V1VolumeMount]', 'working_dir': 'str' } attribute_map = { 'args': 'args', 'command': 'command', 'env': 'env', 'env_from': 'envFrom', 'image': 'image', 'image_pull_policy': 'imagePullPolicy', 'lifecycle': 'lifecycle', 'liveness_probe': 'livenessProbe', 'name': 'name', 'ports': 'ports', 'protocol_version': 'protocolVersion', 'readiness_probe': 'readinessProbe', 'resources': 'resources', 'runtime_version': 'runtimeVersion', 'security_context': 'securityContext', 'startup_probe': 'startupProbe', 'stdin': 'stdin', 'stdin_once': 'stdinOnce', 'storage_uri': 'storageUri', 'termination_message_path': 'terminationMessagePath', 'termination_message_policy': 'terminationMessagePolicy', 'tty': 'tty', 'volume_devices': 'volumeDevices', 'volume_mounts': 'volumeMounts', 'working_dir': 'workingDir' } def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, protocol_version=None, readiness_probe=None, resources=None, runtime_version=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, storage_uri=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501 """V1beta1PaddleServerSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._args = None self._command = None self._env = None self._env_from = None self._image = None self._image_pull_policy = None self._lifecycle = None self._liveness_probe = None self._name = None self._ports = None self._protocol_version = None self._readiness_probe = None self._resources = None self._runtime_version = None self._security_context = None self._startup_probe = None self._stdin = None self._stdin_once = None self._storage_uri = None self._termination_message_path = None self._termination_message_policy = None self._tty = None self._volume_devices = None self._volume_mounts = None self._working_dir = None self.discriminator = None if args is not None: self.args = args if command is not None: self.command = command if env is not None: self.env = env if env_from is not None: self.env_from = env_from if image is not None: self.image = image if image_pull_policy is not None: self.image_pull_policy = image_pull_policy if lifecycle is not None: self.lifecycle = lifecycle if liveness_probe is not None: self.liveness_probe = liveness_probe if name is not None: self.name = name if ports is not None: self.ports = ports if protocol_version is not None: self.protocol_version = protocol_version if readiness_probe is not None: self.readiness_probe = readiness_probe if resources is not None: self.resources = resources if runtime_version is not None: self.runtime_version = runtime_version if security_context is not None: self.security_context = security_context if startup_probe is not None: self.startup_probe = startup_probe if stdin is not None: self.stdin = stdin if stdin_once is not None: self.stdin_once = stdin_once if storage_uri is not None: self.storage_uri = storage_uri if termination_message_path is not None: self.termination_message_path = termination_message_path if termination_message_policy is not None: self.termination_message_policy = termination_message_policy if tty is not None: self.tty = tty if volume_devices is not None: self.volume_devices = volume_devices if volume_mounts is not None: self.volume_mounts = volume_mounts if working_dir is not None: self.working_dir = working_dir @property def args(self): """Gets the args of this V1beta1PaddleServerSpec. # noqa: E501 Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501 :return: The args of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: list[str] """ return self._args @args.setter def args(self, args): """Sets the args of this V1beta1PaddleServerSpec. Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501 :param args: The args of this V1beta1PaddleServerSpec. # noqa: E501 :type: list[str] """ self._args = args @property def command(self): """Gets the command of this V1beta1PaddleServerSpec. # noqa: E501 Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501 :return: The command of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: list[str] """ return self._command @command.setter def command(self, command): """Sets the command of this V1beta1PaddleServerSpec. Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501 :param command: The command of this V1beta1PaddleServerSpec. # noqa: E501 :type: list[str] """ self._command = command @property def env(self): """Gets the env of this V1beta1PaddleServerSpec. # noqa: E501 List of environment variables to set in the container. Cannot be updated. # noqa: E501 :return: The env of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: list[V1EnvVar] """ return self._env @env.setter def env(self, env): """Sets the env of this V1beta1PaddleServerSpec. List of environment variables to set in the container. Cannot be updated. # noqa: E501 :param env: The env of this V1beta1PaddleServerSpec. # noqa: E501 :type: list[V1EnvVar] """ self._env = env @property def env_from(self): """Gets the env_from of this V1beta1PaddleServerSpec. # noqa: E501 List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501 :return: The env_from of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: list[V1EnvFromSource] """ return self._env_from @env_from.setter def env_from(self, env_from): """Sets the env_from of this V1beta1PaddleServerSpec. List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501 :param env_from: The env_from of this V1beta1PaddleServerSpec. # noqa: E501 :type: list[V1EnvFromSource] """ self._env_from = env_from @property def image(self): """Gets the image of this V1beta1PaddleServerSpec. # noqa: E501 Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501 :return: The image of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._image @image.setter def image(self, image): """Sets the image of this V1beta1PaddleServerSpec. Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501 :param image: The image of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._image = image @property def image_pull_policy(self): """Gets the image_pull_policy of this V1beta1PaddleServerSpec. # noqa: E501 Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501 :return: The image_pull_policy of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._image_pull_policy @image_pull_policy.setter def image_pull_policy(self, image_pull_policy): """Sets the image_pull_policy of this V1beta1PaddleServerSpec. Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501 :param image_pull_policy: The image_pull_policy of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._image_pull_policy = image_pull_policy @property def lifecycle(self): """Gets the lifecycle of this V1beta1PaddleServerSpec. # noqa: E501 :return: The lifecycle of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: V1Lifecycle """ return self._lifecycle @lifecycle.setter def lifecycle(self, lifecycle): """Sets the lifecycle of this V1beta1PaddleServerSpec. :param lifecycle: The lifecycle of this V1beta1PaddleServerSpec. # noqa: E501 :type: V1Lifecycle """ self._lifecycle = lifecycle @property def liveness_probe(self): """Gets the liveness_probe of this V1beta1PaddleServerSpec. # noqa: E501 :return: The liveness_probe of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: V1Probe """ return self._liveness_probe @liveness_probe.setter def liveness_probe(self, liveness_probe): """Sets the liveness_probe of this V1beta1PaddleServerSpec. :param liveness_probe: The liveness_probe of this V1beta1PaddleServerSpec. # noqa: E501 :type: V1Probe """ self._liveness_probe = liveness_probe @property def name(self): """Gets the name of this V1beta1PaddleServerSpec. # noqa: E501 Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501 :return: The name of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this V1beta1PaddleServerSpec. Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501 :param name: The name of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._name = name @property def ports(self): """Gets the ports of this V1beta1PaddleServerSpec. # noqa: E501 List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501 :return: The ports of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: list[V1ContainerPort] """ return self._ports @ports.setter def ports(self, ports): """Sets the ports of this V1beta1PaddleServerSpec. List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501 :param ports: The ports of this V1beta1PaddleServerSpec. # noqa: E501 :type: list[V1ContainerPort] """ self._ports = ports @property def protocol_version(self): """Gets the protocol_version of this V1beta1PaddleServerSpec. # noqa: E501 Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501 :return: The protocol_version of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._protocol_version @protocol_version.setter def protocol_version(self, protocol_version): """Sets the protocol_version of this V1beta1PaddleServerSpec. Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501 :param protocol_version: The protocol_version of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._protocol_version = protocol_version @property def readiness_probe(self): """Gets the readiness_probe of this V1beta1PaddleServerSpec. # noqa: E501 :return: The readiness_probe of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: V1Probe """ return self._readiness_probe @readiness_probe.setter def readiness_probe(self, readiness_probe): """Sets the readiness_probe of this V1beta1PaddleServerSpec. :param readiness_probe: The readiness_probe of this V1beta1PaddleServerSpec. # noqa: E501 :type: V1Probe """ self._readiness_probe = readiness_probe @property def resources(self): """Gets the resources of this V1beta1PaddleServerSpec. # noqa: E501 :return: The resources of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: V1ResourceRequirements """ return self._resources @resources.setter def resources(self, resources): """Sets the resources of this V1beta1PaddleServerSpec. :param resources: The resources of this V1beta1PaddleServerSpec. # noqa: E501 :type: V1ResourceRequirements """ self._resources = resources @property def runtime_version(self): """Gets the runtime_version of this V1beta1PaddleServerSpec. # noqa: E501 Runtime version of the predictor docker image # noqa: E501 :return: The runtime_version of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._runtime_version @runtime_version.setter def runtime_version(self, runtime_version): """Sets the runtime_version of this V1beta1PaddleServerSpec. Runtime version of the predictor docker image # noqa: E501 :param runtime_version: The runtime_version of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._runtime_version = runtime_version @property def security_context(self): """Gets the security_context of this V1beta1PaddleServerSpec. # noqa: E501 :return: The security_context of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: V1SecurityContext """ return self._security_context @security_context.setter def security_context(self, security_context): """Sets the security_context of this V1beta1PaddleServerSpec. :param security_context: The security_context of this V1beta1PaddleServerSpec. # noqa: E501 :type: V1SecurityContext """ self._security_context = security_context @property def startup_probe(self): """Gets the startup_probe of this V1beta1PaddleServerSpec. # noqa: E501 :return: The startup_probe of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: V1Probe """ return self._startup_probe @startup_probe.setter def startup_probe(self, startup_probe): """Sets the startup_probe of this V1beta1PaddleServerSpec. :param startup_probe: The startup_probe of this V1beta1PaddleServerSpec. # noqa: E501 :type: V1Probe """ self._startup_probe = startup_probe @property def stdin(self): """Gets the stdin of this V1beta1PaddleServerSpec. # noqa: E501 Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501 :return: The stdin of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: bool """ return self._stdin @stdin.setter def stdin(self, stdin): """Sets the stdin of this V1beta1PaddleServerSpec. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501 :param stdin: The stdin of this V1beta1PaddleServerSpec. # noqa: E501 :type: bool """ self._stdin = stdin @property def stdin_once(self): """Gets the stdin_once of this V1beta1PaddleServerSpec. # noqa: E501 Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501 :return: The stdin_once of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: bool """ return self._stdin_once @stdin_once.setter def stdin_once(self, stdin_once): """Sets the stdin_once of this V1beta1PaddleServerSpec. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501 :param stdin_once: The stdin_once of this V1beta1PaddleServerSpec. # noqa: E501 :type: bool """ self._stdin_once = stdin_once @property def storage_uri(self): """Gets the storage_uri of this V1beta1PaddleServerSpec. # noqa: E501 This field points to the location of the trained model which is mounted onto the pod. # noqa: E501 :return: The storage_uri of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._storage_uri @storage_uri.setter def storage_uri(self, storage_uri): """Sets the storage_uri of this V1beta1PaddleServerSpec. This field points to the location of the trained model which is mounted onto the pod. # noqa: E501 :param storage_uri: The storage_uri of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._storage_uri = storage_uri @property def termination_message_path(self): """Gets the termination_message_path of this V1beta1PaddleServerSpec. # noqa: E501 Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501 :return: The termination_message_path of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._termination_message_path @termination_message_path.setter def termination_message_path(self, termination_message_path): """Sets the termination_message_path of this V1beta1PaddleServerSpec. Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501 :param termination_message_path: The termination_message_path of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._termination_message_path = termination_message_path @property def termination_message_policy(self): """Gets the termination_message_policy of this V1beta1PaddleServerSpec. # noqa: E501 Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501 :return: The termination_message_policy of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._termination_message_policy @termination_message_policy.setter def termination_message_policy(self, termination_message_policy): """Sets the termination_message_policy of this V1beta1PaddleServerSpec. Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501 :param termination_message_policy: The termination_message_policy of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._termination_message_policy = termination_message_policy @property def tty(self): """Gets the tty of this V1beta1PaddleServerSpec. # noqa: E501 Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501 :return: The tty of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: bool """ return self._tty @tty.setter def tty(self, tty): """Sets the tty of this V1beta1PaddleServerSpec. Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501 :param tty: The tty of this V1beta1PaddleServerSpec. # noqa: E501 :type: bool """ self._tty = tty @property def volume_devices(self): """Gets the volume_devices of this V1beta1PaddleServerSpec. # noqa: E501 volumeDevices is the list of block devices to be used by the container. # noqa: E501 :return: The volume_devices of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: list[V1VolumeDevice] """ return self._volume_devices @volume_devices.setter def volume_devices(self, volume_devices): """Sets the volume_devices of this V1beta1PaddleServerSpec. volumeDevices is the list of block devices to be used by the container. # noqa: E501 :param volume_devices: The volume_devices of this V1beta1PaddleServerSpec. # noqa: E501 :type: list[V1VolumeDevice] """ self._volume_devices = volume_devices @property def volume_mounts(self): """Gets the volume_mounts of this V1beta1PaddleServerSpec. # noqa: E501 Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501 :return: The volume_mounts of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: list[V1VolumeMount] """ return self._volume_mounts @volume_mounts.setter def volume_mounts(self, volume_mounts): """Sets the volume_mounts of this V1beta1PaddleServerSpec. Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501 :param volume_mounts: The volume_mounts of this V1beta1PaddleServerSpec. # noqa: E501 :type: list[V1VolumeMount] """ self._volume_mounts = volume_mounts @property def working_dir(self): """Gets the working_dir of this V1beta1PaddleServerSpec. # noqa: E501 Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501 :return: The working_dir of this V1beta1PaddleServerSpec. # noqa: E501 :rtype: str """ return self._working_dir @working_dir.setter def working_dir(self, working_dir): """Sets the working_dir of this V1beta1PaddleServerSpec. Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501 :param working_dir: The working_dir of this V1beta1PaddleServerSpec. # noqa: E501 :type: str """ self._working_dir = working_dir def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1PaddleServerSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1PaddleServerSpec): return True return self.to_dict() != other.to_dict()
41.697616
616
0.678362
bb4c22f167797e4d81f6bce0bc00b0b63457c675
28
py
Python
vis/__init__.py
JessikaSmith/OptimizationAlgorithms
bf0f871f4d6150e1e7533360cfc6f70eb616c870
[ "MIT" ]
15
2018-11-16T04:42:44.000Z
2020-03-20T16:00:47.000Z
vis/__init__.py
JessikaSmith/OptimizationAlgorithms
bf0f871f4d6150e1e7533360cfc6f70eb616c870
[ "MIT" ]
null
null
null
vis/__init__.py
JessikaSmith/OptimizationAlgorithms
bf0f871f4d6150e1e7533360cfc6f70eb616c870
[ "MIT" ]
3
2019-01-17T13:18:56.000Z
2019-12-17T22:22:48.000Z
from vis.tsp_tools import *
14
27
0.785714
8089357fd39fffef32ac991adb7c98ca9d05a2b5
5,227
py
Python
samples/feather_damage/video_detection2.py
clmpng/Mask_RCNN
0a90ce2710284aef29c8198b5e1a0ea5d440f1cd
[ "MIT" ]
null
null
null
samples/feather_damage/video_detection2.py
clmpng/Mask_RCNN
0a90ce2710284aef29c8198b5e1a0ea5d440f1cd
[ "MIT" ]
null
null
null
samples/feather_damage/video_detection2.py
clmpng/Mask_RCNN
0a90ce2710284aef29c8198b5e1a0ea5d440f1cd
[ "MIT" ]
null
null
null
import os import sys import numpy as np np.random.seed(0) import matplotlib.pyplot as plt import cv2 import argparse # Root directory of the project ROOT_DIR = '../../' assert os.path.exists(ROOT_DIR), 'ROOT_DIR does not exist. Did you forget to read the instructions above? ;)' # Import mrcnn libraries sys.path.append(ROOT_DIR) from mrcnn.config import Config import mrcnn.utils as utils from mrcnn import visualize import mrcnn.model as modellib def apply_mask(image, mask, color, alpha=0.5): """apply mask to image""" for n, c in enumerate(color): image[:, :, n] = np.where( mask == 1, image[:, :, n] * (1 - alpha) + alpha * c, image[:, :, n] ) return image def display_instances(image, boxes, masks, class_ids, class_names, scores=None): assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] N = boxes.shape[0] colors = colors = [tuple(255 * np.random.rand(3)) for _ in range(N)] for i, c in enumerate(colors): if not np.any(boxes[i]): continue y1, x1, y2, x2 = boxes[i] label = class_names[class_ids[i]] score = scores[i] if scores is not None else None caption = "{} {:.3f}".format(label, score) if score else label # Mask mask = masks[:, :, i] image = apply_mask(image, mask, c) image = cv2.rectangle(image, (x1, y1), (x2, y2), c, 2) image = cv2.putText(image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, c, 2) return image class InferenceConfig(Config): """Configuration for training on the cigarette butts dataset. Derives from the base Config class and overrides values specific to the cigarette butts dataset. """ # Give the configuration a recognizable name NAME = "feather__damage" # Train on 1 GPU and 1 image per GPU. Batch size is 1 (GPUs * images/GPU). GPU_COUNT = 1 IMAGES_PER_GPU = 1 # Number of classes (including background) NUM_CLASSES = 1 + 1 # background + 1 (hen) # All of our training images are 512x512 IMAGE_MIN_DIM = None IMAGE_MAX_DIM = 512 # must be dividable by 2 at least 6 times # You can experiment with this number to see if it improves training STEPS_PER_EPOCH = 50 # 500 # This is how often validation is run. If you are using too much hard drive space # on saved models (in the MODEL_DIR), try making this value larger. VALIDATION_STEPS = 10 # 5 # Matterport originally used resnet101, but I downsized to fit it on my graphics card BACKBONE = 'resnet50' RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) TRAIN_ROIS_PER_IMAGE = 32 MAX_GT_INSTANCES = 20 POST_NMS_ROIS_INFERENCE = 500 POST_NMS_ROIS_TRAINING = 1000 USE_MINI_MASK = False inference_config = InferenceConfig() if __name__ == '__main__': parser = argparse.ArgumentParser(description='MaskRCNN Video Object Detection/Instance Segmentation') parser.add_argument('-v', '--video_path', type=str, default='', help='Path to video. If None camera will be used') parser.add_argument('-sp', '--save_path', type=str, default='', help= 'Path to save the output. If None output won\'t be saved') parser.add_argument('-s', '--show', default=True, action="store_false", help='Show output') args = parser.parse_args() # Directory to save logs and trained model MODEL_DIR = "/media/christian/SamsungSSD/tensorflow_logs/" # Local path to trained weights file COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5") # Download COCO trained weights from Releases if needed if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) class_names = ['hen'] config = InferenceConfig() # Create model object in inference mode. model = modellib.MaskRCNN(mode="inference", config=inference_config, model_dir=MODEL_DIR) # Load weights model_path = os.path.join(ROOT_DIR, "/media/christian/SamsungSSD/tensorflow_logs/score_training_700_epochs_3_scores_extra_fc_layer/mask_rcnn_feather__damage_0699.h5") print("Loading weights from ", model_path) model.load_weights(model_path, by_name=True) if args.video_path != '': cap = cv2.VideoCapture(args.video_path) else: cap = cv2.VideoCapture(0) if args.save_path: width = int(cap.get(3)) height = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) out = cv2.VideoWriter(args.save_path, cv2.VideoWriter_fourcc('M','J','P','G'), fps, (width, height)) while cap.isOpened(): ret, image = cap.read() results = model.detect([image], verbose=1) r = results[0] image = display_instances(image[..., :3], r['rois'], r['masks'], r['class_ids'], dataset_val.class_names, r['class_scores'], r['scores']) if args.show: cv2.imshow('MaskRCNN Object Detection/Instance Segmentation', image) if cv2.waitKey(1) & 0xFF == ord('q'): break if args.save_path: out.write(image) cap.release() if args.save_path: out.release() cv2.destroyAllWindows()
35.80137
170
0.652956
32b7e517851b835c9a99f97b5238e43225a1af4f
2,905
py
Python
httpie/legacy/v3_1_0_session_cookie_format.py
10088/httpie
dc5274e491b48d8294d530eb12186df28bf9d1b7
[ "BSD-3-Clause" ]
2
2022-01-31T18:18:58.000Z
2022-01-31T18:26:35.000Z
httpie/legacy/v3_1_0_session_cookie_format.py
10088/httpie
dc5274e491b48d8294d530eb12186df28bf9d1b7
[ "BSD-3-Clause" ]
2
2022-03-09T06:26:07.000Z
2022-03-09T06:26:17.000Z
httpie/legacy/v3_1_0_session_cookie_format.py
10088/httpie
dc5274e491b48d8294d530eb12186df28bf9d1b7
[ "BSD-3-Clause" ]
null
null
null
import argparse from typing import Any, Type, List, Dict, TYPE_CHECKING if TYPE_CHECKING: from httpie.sessions import Session INSECURE_COOKIE_JAR_WARNING = '''\ Outdated layout detected for the current session. Please consider updating it, in order to not get affected by potential security problems. For fixing the current session: With binding all cookies to the current host (secure): $ httpie cli sessions upgrade --bind-cookies {hostname} {session_id} Without binding cookies (leaving them as is) (insecure): $ httpie cli sessions upgrade {hostname} {session_id} ''' INSECURE_COOKIE_JAR_WARNING_FOR_NAMED_SESSIONS = '''\ For fixing all named sessions: With binding all cookies to the current host (secure): $ httpie cli sessions upgrade-all --bind-cookies Without binding cookies (leaving them as is) (insecure): $ httpie cli sessions upgrade-all ''' INSECURE_COOKIE_SECURITY_LINK = '\nSee https://pie.co/docs/security for more information.' def pre_process(session: 'Session', cookies: Any) -> List[Dict[str, Any]]: """Load the given cookies to the cookie jar while maintaining support for the old cookie layout.""" is_old_style = isinstance(cookies, dict) if is_old_style: normalized_cookies = [ { 'name': key, **value } for key, value in cookies.items() ] else: normalized_cookies = cookies should_issue_warning = is_old_style and any( cookie.get('domain', '') == '' for cookie in normalized_cookies ) if should_issue_warning: warning = INSECURE_COOKIE_JAR_WARNING.format(hostname=session.bound_host, session_id=session.session_id) if not session.is_anonymous: warning += INSECURE_COOKIE_JAR_WARNING_FOR_NAMED_SESSIONS warning += INSECURE_COOKIE_SECURITY_LINK session.warn_legacy_usage(warning) return normalized_cookies def post_process( normalized_cookies: List[Dict[str, Any]], *, original_type: Type[Any] ) -> Any: """Convert the cookies to their original format for maximum compatibility.""" if issubclass(original_type, dict): return { cookie.pop('name'): cookie for cookie in normalized_cookies } else: return normalized_cookies def fix_layout(session: 'Session', hostname: str, args: argparse.Namespace) -> None: if not isinstance(session['cookies'], dict): return None session['cookies'] = [ { 'name': key, **value } for key, value in session['cookies'].items() ] for cookie in session.cookies: if cookie.domain == '': if args.bind_cookies: cookie.domain = hostname else: cookie._rest['is_explicit_none'] = True
28.762376
112
0.651635
0e5650009acd217e47d867080aac660f80a0f314
1,441
py
Python
Lektion_002_Werte_einlesen.py
f-vdb/Python3-Kurs
10c01d481452165bf802bd1dba4a0e01ff666fb2
[ "CC0-1.0" ]
null
null
null
Lektion_002_Werte_einlesen.py
f-vdb/Python3-Kurs
10c01d481452165bf802bd1dba4a0e01ff666fb2
[ "CC0-1.0" ]
null
null
null
Lektion_002_Werte_einlesen.py
f-vdb/Python3-Kurs
10c01d481452165bf802bd1dba4a0e01ff666fb2
[ "CC0-1.0" ]
null
null
null
zahlA = input("Bitte gib eine ganze Zahl ein: ") zahlB = input("Bitte gib eine zweite ganze Zahl ein: ") summe = zahlA + zahlB print(summe) # Ausgabe: "2342" # Das ist nicht das was ich wollte. Ich wollte doch die Summe der beiden Zahlen ausrechnen. # Eine Idee, was hier schief geht? # Passen vlt. die Datentypen nicht mit dem zusammen, was ich möchte? # Zweiter Versuch zahlA = int(input("Bitte gib eine ganze Zahl ein: ")) zahlB = int(input("Bitte gib eine zweite ganze Zahl ein: ")) summe = zahlA + zahlB print(summe) # Jetzt funktioniert es. Aber was passiert wenn ich anstatt einer ganzen Zahl, # einen String eingebe, z.B. Hallo? # Teste mal das Programm! # Fehlermeldung: ValueError: invalid literal for int() with base 10: 'Hallo' # Ok, invalid literal for int. "Hallo" ist kein int, sondern ein String. # Aber das Programm soll auf keinen Fall abstürzen. # Es wird es etwas kompliziert: while(True): try: zahlA = int(input("Bitte eine Zahl eingeben: ")) break except: print("Du hast keine Zahl eingeben! Versuche es nochmal.\n") while(True): try: zahlB = int(input("Bitte eine zweite Zahl eingeben: ")) break except: print("Du hast keine Zahle eingeben! Versuche es nochmal.\n") summe = zahlA + zahlB print("Die Summe von " + str(zahlA) + " und " + str(zahlB) + " ist " + str(summe) + ".") # Oha, dass muss man am Anfang auswendig lernen.........
27.711538
91
0.675226
4a9b53ff2a90e1116b242bb35866d5e3c38e7fb2
3,266
py
Python
nwb_conversion_tools/datainterfaces/__init__.py
Saksham20/nwb-conversion-tools
db9e06e584c5bdde89317bfb3146d7b154667036
[ "BSD-3-Clause" ]
19
2020-05-04T18:40:36.000Z
2022-01-24T08:53:14.000Z
nwb_conversion_tools/datainterfaces/__init__.py
Saksham20/nwb-conversion-tools
db9e06e584c5bdde89317bfb3146d7b154667036
[ "BSD-3-Clause" ]
369
2020-04-06T14:20:08.000Z
2022-03-31T16:05:48.000Z
nwb_conversion_tools/datainterfaces/__init__.py
Saksham20/nwb-conversion-tools
db9e06e584c5bdde89317bfb3146d7b154667036
[ "BSD-3-Clause" ]
10
2020-03-31T20:06:00.000Z
2022-03-26T08:25:49.000Z
from .ecephys.tutorial.recordingtutorialdatainterface import RecordingTutorialInterface from .ecephys.tutorial.sortingtutorialdatainterface import SortingTutorialInterface from .ecephys.neuroscope.neuroscopedatainterface import ( NeuroscopeRecordingInterface, NeuroscopeLFPInterface, NeuroscopeMultiRecordingTimeInterface, NeuroscopeSortingInterface, ) from .ecephys.spikeglx.spikeglxdatainterface import SpikeGLXRecordingInterface, SpikeGLXLFPInterface from .ecephys.spikegadgets.spikegadgetsdatainterface import SpikeGadgetsRecordingInterface from .ecephys.spikeinterface.sipickledatainterfaces import ( SIPickleRecordingExtractorInterface, SIPickleSortingExtractorInterface, ) from .ecephys.intan.intandatainterface import IntanRecordingInterface from .ecephys.ced.ceddatainterface import CEDRecordingInterface from .ecephys.cellexplorer.cellexplorerdatainterface import CellExplorerSortingInterface from .ecephys.blackrock.blackrockdatainterface import ( BlackrockRecordingExtractorInterface, BlackrockSortingExtractorInterface, ) from .ecephys.openephys.openephysdatainterface import ( OpenEphysRecordingExtractorInterface, OpenEphysSortingExtractorInterface, ) from .ecephys.axona.axonadatainterface import ( AxonaRecordingExtractorInterface, AxonaPositionDataInterface, AxonaLFPDataInterface, AxonaUnitRecordingExtractorInterface, ) from .ecephys.neuralynx.neuralynxdatainterface import NeuralynxRecordingInterface from .ecephys.phy.phydatainterface import PhySortingInterface from .ophys.caiman.caimandatainterface import CaimanSegmentationInterface from .ophys.cnmfe.cnmfedatainterface import CnmfeSegmentationInterface from .ophys.suite2p.suite2pdatainterface import Suite2pSegmentationInterface from .ophys.extract.extractdatainterface import ExtractSegmentationInterface from .ophys.sima.simadatainterface import SimaSegmentationInterface from .ophys.sbx.sbxdatainterface import SbxImagingInterface from .ophys.tiff.tiffdatainterface import TiffImagingInterface from .ophys.hdf5.hdf5datainterface import Hdf5ImagingInterface from .behavior.movie.moviedatainterface import MovieInterface interface_list = [ RecordingTutorialInterface, SortingTutorialInterface, NeuralynxRecordingInterface, NeuroscopeRecordingInterface, NeuroscopeMultiRecordingTimeInterface, NeuroscopeSortingInterface, NeuroscopeLFPInterface, SpikeGLXRecordingInterface, SpikeGLXLFPInterface, SpikeGadgetsRecordingInterface, SIPickleRecordingExtractorInterface, SIPickleSortingExtractorInterface, IntanRecordingInterface, CEDRecordingInterface, CellExplorerSortingInterface, BlackrockRecordingExtractorInterface, BlackrockSortingExtractorInterface, OpenEphysRecordingExtractorInterface, OpenEphysSortingExtractorInterface, PhySortingInterface, AxonaRecordingExtractorInterface, AxonaPositionDataInterface, AxonaLFPDataInterface, AxonaUnitRecordingExtractorInterface, CaimanSegmentationInterface, CnmfeSegmentationInterface, Suite2pSegmentationInterface, ExtractSegmentationInterface, SimaSegmentationInterface, SbxImagingInterface, TiffImagingInterface, Hdf5ImagingInterface, MovieInterface, ]
39.349398
100
0.855481
5db1871ba399619979cc87e2fbc3413f1081e231
3,479
py
Python
apps/dash-pileup-demo/layout_helper.py
JeroenvdSande/dash-sample-apps
106fa24693cfdaf47c06466a0aed78e642344f91
[ "MIT" ]
1
2021-06-04T10:04:55.000Z
2021-06-04T10:04:55.000Z
apps/dash-pileup-demo/layout_helper.py
JeroenvdSande/dash-sample-apps
106fa24693cfdaf47c06466a0aed78e642344f91
[ "MIT" ]
null
null
null
apps/dash-pileup-demo/layout_helper.py
JeroenvdSande/dash-sample-apps
106fa24693cfdaf47c06466a0aed78e642344f91
[ "MIT" ]
1
2021-07-27T11:22:35.000Z
2021-07-27T11:22:35.000Z
import base64 import os import dash import dash_core_components as dcc import dash_html_components as html def run_standalone_app(layout, callbacks, header_colors, filename): """Run demo app (tests/dashbio_demos/*/app.py) as standalone app.""" app = dash.Dash(__name__) app.scripts.config.serve_locally = True # Handle callback to component with id "fullband-switch" app.config["suppress_callback_exceptions"] = True # Get all information from filename app_name = os.getenv("DASH_APP_NAME", "") if app_name == "": app_name = os.path.basename(os.path.dirname(filename)) app_name = app_name.replace("dash-", "") app_title = "{}".format(app_name.replace("-", " ").title()) # Assign layout app.layout = app_page_layout( page_layout=layout(app), app_title=app_title, app_name=app_name, standalone=True, **header_colors() ) # Register all callbacks callbacks(app) # return app object return app def app_page_layout( page_layout, app_title="Dash Bio App", app_name="", light_logo=True, standalone=False, bg_color="#506784", font_color="#F3F6FA", ): return html.Div( id="main_page", children=[ dcc.Location(id="url", refresh=False), html.Div( id="app-page-header", children=[ html.A( id="dashbio-logo", children=[ html.Img( src="data:image/png;base64,{}".format( base64.b64encode( open( "./assets/plotly-dash-bio-logo.png", "rb" ).read() ).decode() ) ) ], href="/Portal" if standalone else "/dash-bio", ), html.H2(app_title), html.A( id="gh-link", children=["View on GitHub"], href="http://github.com/plotly/dash-bio/" "blob/master/tests/dashbio_demos/dash-{}/app.py".format( app_name ), style={ "color": "white" if light_logo else "black", "border": "solid 1px white" if light_logo else "solid 1px black", }, ), html.Img( src="data:image/png;base64,{}".format( base64.b64encode( open( "./assets/GitHub-Mark-{}64px.png".format( "Light-" if light_logo else "" ), "rb", ).read() ).decode() ) ), ], style={"background": bg_color, "color": font_color,}, ), html.Div(id="app-page-content", children=page_layout), ], )
33.451923
85
0.415349
cf27aee502253124ee0922c62b9e6f6a04acf0e9
2,807
py
Python
BS-AP-locatioon/apLoc.py
darcy-shimmer/AG-LayCast
818f03e3149086b0766500c177e80af6777a307d
[ "Apache-2.0" ]
1
2019-08-20T01:23:05.000Z
2019-08-20T01:23:05.000Z
BS-AP-locatioon/apLoc.py
darcy-shimmer/AG-LayCast
818f03e3149086b0766500c177e80af6777a307d
[ "Apache-2.0" ]
null
null
null
BS-AP-locatioon/apLoc.py
darcy-shimmer/AG-LayCast
818f03e3149086b0766500c177e80af6777a307d
[ "Apache-2.0" ]
null
null
null
import linecache import urllib.request import json import time import re #according to queue of lat&lon, find wifi's mac def registerUrl(Index,outIndex): #try: url ="http://api.cellocation.com:81/rewifi/?lat="+str(Index)+"&lon="+str(outIndex)+"&n=10" #print(url) data = urllib.request.urlopen(url).read() return data #except Exception as e: # print(e) def extract(): print('begin extract key values') line_cache = linecache.getlines('bjwifi_mac') with open('bjwifi_mac_ed', 'a') as ff: for i in range(1, len(line_cache)): print(line_cache[i]) print(len(line_cache[i])) if len(line_cache[i]) > 3: c = re.sub('}, {', '}' + '\n' + '{', line_cache[i]) ff.writelines(c) linecache.clearcache() print('extracted') #according to wifi's mac, find exact location def refind(MAC): #try: url = "http://api.cellocation.com:81/wifi/?mac=" + str(MAC) + "&output=json" data = urllib.request.urlopen(url).read() return data #except Exception as e: # print(e) def main(): latrange = [i/10000.0 for i in range(398642,399694,50)] lonrange = [i/10000.0 for i in range(1163002,1164570,50)] for outindex in lonrange: for index in latrange: try: data = registerUrl(index, outindex) except Exception as e: print(e) time.sleep(3600) continue else: value = json.loads(data) valuee = str(value) with open('bjwifi_mac', 'a') as ff: print(valuee) print(index) print(outindex) ff.writelines(valuee) ff.writelines('\n') time.sleep(1) extract() lin_c = linecache.getlines('bjwifi_mac_ed') print('begin!') for i in range(1, len(lin_c)): mac = re.findall(r"mac': '(..:..:..:..:..:..)'", lin_c[i]) print(mac[0]) try: data = refind(mac[0]) except Exception as e: print(e) time.sleep(3600) continue else: value = json.loads(data) valuee = str(value) print(valuee) if re.findall('10000', valuee) != ['10000']: if re.findall('10001', valuee) != ['10001']: if re.findall(": '北京市", valuee) == [": '北京市"]: with open('ap_location', 'a') as ff: print(valuee) print(mac) ff.writelines(valuee) ff.writelines('\n') time.sleep(1) linecache.clearcache() if __name__ == "__main__": main()
30.51087
94
0.505166
36cb307ecdead7d0ddf19450df211f3b1346aca1
873
py
Python
docs/jnpr_healthbot_swagger/test/test_tsdb_error.py
Juniper/healthbot-py-client
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
[ "Apache-2.0" ]
10
2019-10-23T12:54:37.000Z
2022-02-07T19:24:30.000Z
docs/jnpr_healthbot_swagger/test/test_tsdb_error.py
Juniper/healthbot-py-client
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
[ "Apache-2.0" ]
5
2019-09-30T04:29:25.000Z
2022-02-16T12:21:06.000Z
docs/jnpr_healthbot_swagger/test/test_tsdb_error.py
Juniper/healthbot-py-client
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
[ "Apache-2.0" ]
4
2019-09-30T01:17:48.000Z
2020-08-25T07:27:54.000Z
# coding: utf-8 """ Healthbot APIs API interface for Healthbot application # noqa: E501 OpenAPI spec version: 3.1.0 Contact: healthbot-feedback@juniper.net Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import swagger_client from swagger_client.models.tsdb_error import TsdbError # noqa: E501 from swagger_client.rest import ApiException class TestTsdbError(unittest.TestCase): """TsdbError unit test stubs""" def setUp(self): pass def tearDown(self): pass def testTsdbError(self): """Test TsdbError""" # FIXME: construct object with mandatory attributes with example values # model = swagger_client.models.tsdb_error.TsdbError() # noqa: E501 pass if __name__ == '__main__': unittest.main()
21.292683
79
0.697595
dcb2a4787deb7fccbb1c7d4e01ee1cc94ce4c852
474
py
Python
plugins/hydra_gridengine_launcher/example/my_app.py
ethanluoyc/submitit_gridengine
c7da16038c9aa8a3b2f296b94365a5787ea49d37
[ "MIT" ]
4
2020-10-21T13:33:02.000Z
2021-03-12T21:17:19.000Z
plugins/hydra_gridengine_launcher/example/my_app.py
ethanluoyc/submitit_gridengine
c7da16038c9aa8a3b2f296b94365a5787ea49d37
[ "MIT" ]
null
null
null
plugins/hydra_gridengine_launcher/example/my_app.py
ethanluoyc/submitit_gridengine
c7da16038c9aa8a3b2f296b94365a5787ea49d37
[ "MIT" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import logging import os import time import hydra import submitit from omegaconf import DictConfig log = logging.getLogger(__name__) @hydra.main(config_name="config") def my_app(cfg: DictConfig) -> None: # env = submitit.JobEnvironment() # log.info(f"Process ID {os.getpid()} executing task {cfg.task}, with {env}") print(cfg) time.sleep(1) if __name__ == "__main__": my_app()
20.608696
81
0.71097
655bfdf9a317ee6fc20f7ee515233592597f1f2f
1,656
py
Python
lib/datasets/factory.py
tianzhi0549/py-faster-rcnn-resnet-imagenet
1d358cd341fa092a7c2f271a09e5d0385c8d3b74
[ "BSD-2-Clause" ]
27
2017-07-31T03:01:23.000Z
2021-07-12T03:46:16.000Z
lib/datasets/factory.py
PranjaliSaini/py-faster-rcnn-resnet-imagenet
1d358cd341fa092a7c2f271a09e5d0385c8d3b74
[ "BSD-2-Clause" ]
2
2017-09-17T19:21:20.000Z
2019-02-26T11:49:51.000Z
lib/datasets/factory.py
PranjaliSaini/py-faster-rcnn-resnet-imagenet
1d358cd341fa092a7c2f271a09e5d0385c8d3b74
[ "BSD-2-Clause" ]
26
2017-08-08T09:26:33.000Z
2021-12-15T03:42:24.000Z
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Factory method for easily getting imdbs by name.""" __sets = {} from datasets.pascal_voc import pascal_voc from datasets.imagenet import imagenet from datasets.coco import coco import numpy as np # Set up voc_<year>_<split> using selective search "fast" mode for year in ['2007', '2012']: for split in ['train', 'val', 'trainval', 'test']: name = 'voc_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: pascal_voc(split, year)) # Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']: for split in ['test', 'test-dev']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) for year in ['2015']: for split in ['train', 'val', "trainval1_woextra", 'val1', 'val2']: name = 'imagenet_{}_{}'.format(year, split) __sets[name] = (lambda split=split: imagenet(split, year)) def get_imdb(name): """Get an imdb (image database) by name.""" if not __sets.has_key(name): raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return __sets.keys()
33.795918
79
0.601449
2377c9fd8ea3f99732527760313323b7a3627584
1,347
py
Python
libvcs/exc.py
tony/libvcs
05db3a77b53326502cbb5bc76e8a6985cd271182
[ "MIT" ]
35
2016-07-16T21:39:10.000Z
2017-11-24T02:52:13.000Z
libvcs/exc.py
tony/libvcs
05db3a77b53326502cbb5bc76e8a6985cd271182
[ "MIT" ]
70
2016-06-20T06:45:12.000Z
2018-03-06T14:57:35.000Z
libvcs/exc.py
tony/libvcs
05db3a77b53326502cbb5bc76e8a6985cd271182
[ "MIT" ]
2
2016-06-21T13:59:00.000Z
2017-05-12T17:49:45.000Z
"""Exceptions for libvcs. If you see this, we're publishing to S3 automatically """ class LibVCSException(Exception): """Standard exception raised by libvcs.""" class CommandError(LibVCSException): """This exception is raised on non-zero return codes.""" def __init__(self, output, returncode=None, cmd=None): self.returncode = returncode self.output = output if cmd: if isinstance(cmd, list): cmd = " ".join(cmd) self.cmd = cmd def __str__(self): message = self.message.format(returncode=self.returncode, cmd=self.cmd) if len(self.output.strip()): message += "\n%s" % self.output return message message = "Command failed with code {returncode}: {cmd}" class CommandTimeoutError(CommandError): """CommandError which gets raised when a subprocess exceeds its timeout.""" class InvalidPipURL(LibVCSException): """Invalid pip-style URL.""" def __init__(self, url): self.url = url super().__init__() def __str__(self): return self.message message = ( "Repo URL %s requires a vcs scheme. Prepend the vcs (hg+, git+, svn+)" "to the repo URL. e.g: git+https://github.com/freebsd/freebsd.git" ) class InvalidVCS(LibVCSException): """Invalid VCS."""
24.944444
79
0.628062
13f484acd312456206e6b7494a25280bd5d8a873
342
py
Python
bot/exts/__init__.py
sam-heller/sir-lancebot
a3bd208580ff223cd0120d0c4b5a1bf3c28d03e5
[ "MIT" ]
1
2021-11-07T04:04:13.000Z
2021-11-07T04:04:13.000Z
bot/exts/__init__.py
sam-heller/sir-lancebot
a3bd208580ff223cd0120d0c4b5a1bf3c28d03e5
[ "MIT" ]
3
2021-08-21T07:26:51.000Z
2022-03-12T01:05:58.000Z
bot/exts/__init__.py
sam-heller/sir-lancebot
a3bd208580ff223cd0120d0c4b5a1bf3c28d03e5
[ "MIT" ]
1
2021-02-11T23:44:40.000Z
2021-02-11T23:44:40.000Z
import logging import pkgutil from typing import Iterator __all__ = ("get_package_names",) log = logging.getLogger(__name__) def get_package_names() -> Iterator[str]: """Iterate names of all packages located in /bot/exts/.""" for package in pkgutil.iter_modules(__path__): if package.ispkg: yield package.name
22.8
62
0.707602
57e64b0ec499522d2b09b760e8c5d737360d0d5b
7,500
py
Python
finnhub/models/upgrade_downgrade.py
gavinjay/finnhub-python
b5c409dafeda390d14a2b0618ae6f25ab8d76c5b
[ "Apache-2.0" ]
null
null
null
finnhub/models/upgrade_downgrade.py
gavinjay/finnhub-python
b5c409dafeda390d14a2b0618ae6f25ab8d76c5b
[ "Apache-2.0" ]
null
null
null
finnhub/models/upgrade_downgrade.py
gavinjay/finnhub-python
b5c409dafeda390d14a2b0618ae6f25ab8d76c5b
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Finnhub API No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from finnhub.configuration import Configuration class UpgradeDowngrade(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'symbol': 'str', 'grade_time': 'int', 'from_grade': 'str', 'to_grade': 'str', 'company': 'str', 'action': 'str' } attribute_map = { 'symbol': 'symbol', 'grade_time': 'gradeTime', 'from_grade': 'fromGrade', 'to_grade': 'toGrade', 'company': 'company', 'action': 'action' } def __init__(self, symbol=None, grade_time=None, from_grade=None, to_grade=None, company=None, action=None, local_vars_configuration=None): # noqa: E501 """UpgradeDowngrade - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._symbol = None self._grade_time = None self._from_grade = None self._to_grade = None self._company = None self._action = None self.discriminator = None if symbol is not None: self.symbol = symbol if grade_time is not None: self.grade_time = grade_time if from_grade is not None: self.from_grade = from_grade if to_grade is not None: self.to_grade = to_grade if company is not None: self.company = company if action is not None: self.action = action @property def symbol(self): """Gets the symbol of this UpgradeDowngrade. # noqa: E501 Company symbol. # noqa: E501 :return: The symbol of this UpgradeDowngrade. # noqa: E501 :rtype: str """ return self._symbol @symbol.setter def symbol(self, symbol): """Sets the symbol of this UpgradeDowngrade. Company symbol. # noqa: E501 :param symbol: The symbol of this UpgradeDowngrade. # noqa: E501 :type: str """ self._symbol = symbol @property def grade_time(self): """Gets the grade_time of this UpgradeDowngrade. # noqa: E501 Upgrade/downgrade time in UNIX timestamp. # noqa: E501 :return: The grade_time of this UpgradeDowngrade. # noqa: E501 :rtype: int """ return self._grade_time @grade_time.setter def grade_time(self, grade_time): """Sets the grade_time of this UpgradeDowngrade. Upgrade/downgrade time in UNIX timestamp. # noqa: E501 :param grade_time: The grade_time of this UpgradeDowngrade. # noqa: E501 :type: int """ self._grade_time = grade_time @property def from_grade(self): """Gets the from_grade of this UpgradeDowngrade. # noqa: E501 From grade. # noqa: E501 :return: The from_grade of this UpgradeDowngrade. # noqa: E501 :rtype: str """ return self._from_grade @from_grade.setter def from_grade(self, from_grade): """Sets the from_grade of this UpgradeDowngrade. From grade. # noqa: E501 :param from_grade: The from_grade of this UpgradeDowngrade. # noqa: E501 :type: str """ self._from_grade = from_grade @property def to_grade(self): """Gets the to_grade of this UpgradeDowngrade. # noqa: E501 To grade. # noqa: E501 :return: The to_grade of this UpgradeDowngrade. # noqa: E501 :rtype: str """ return self._to_grade @to_grade.setter def to_grade(self, to_grade): """Sets the to_grade of this UpgradeDowngrade. To grade. # noqa: E501 :param to_grade: The to_grade of this UpgradeDowngrade. # noqa: E501 :type: str """ self._to_grade = to_grade @property def company(self): """Gets the company of this UpgradeDowngrade. # noqa: E501 Company/analyst who did the upgrade/downgrade. # noqa: E501 :return: The company of this UpgradeDowngrade. # noqa: E501 :rtype: str """ return self._company @company.setter def company(self, company): """Sets the company of this UpgradeDowngrade. Company/analyst who did the upgrade/downgrade. # noqa: E501 :param company: The company of this UpgradeDowngrade. # noqa: E501 :type: str """ self._company = company @property def action(self): """Gets the action of this UpgradeDowngrade. # noqa: E501 Action can take any of the following values: <code>up(upgrade), down(downgrade), main(maintains), init(initiate), reit(reiterate)</code>. # noqa: E501 :return: The action of this UpgradeDowngrade. # noqa: E501 :rtype: str """ return self._action @action.setter def action(self, action): """Sets the action of this UpgradeDowngrade. Action can take any of the following values: <code>up(upgrade), down(downgrade), main(maintains), init(initiate), reit(reiterate)</code>. # noqa: E501 :param action: The action of this UpgradeDowngrade. # noqa: E501 :type: str """ self._action = action def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, UpgradeDowngrade): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, UpgradeDowngrade): return True return self.to_dict() != other.to_dict()
28.51711
159
0.587333
f1a53bfda0cf9b28e2d076ae3fe98190f7d2b9b3
25,664
py
Python
experiments/moscow_exp_nodule/data_loader.py
ivanwilliammd/I3DR-Net
356d9a3d821d22c375b0bcc42ae488fe6e520e21
[ "Apache-2.0" ]
12
2020-04-10T07:25:46.000Z
2021-09-28T07:04:53.000Z
experiments/moscow_exp_nodule/data_loader.py
ivanwilliammd/I3DR-Net
356d9a3d821d22c375b0bcc42ae488fe6e520e21
[ "Apache-2.0" ]
null
null
null
experiments/moscow_exp_nodule/data_loader.py
ivanwilliammd/I3DR-Net
356d9a3d821d22c375b0bcc42ae488fe6e520e21
[ "Apache-2.0" ]
2
2020-04-15T12:52:01.000Z
2020-07-19T08:47:39.000Z
#!/usr/bin/env python # Official implementation code for "Lung Nodule Detection and Classification from Thorax CT-Scan Using RetinaNet with Transfer Learning" and "Lung Nodule Texture Detection and Classification Using 3D CNN." # Adapted from of [medicaldetectiontoolkit](https://github.com/pfjaeger/medicaldetectiontoolkit) and [kinetics_i3d_pytorch](https://github.com/hassony2/kinetics_i3d_pytorch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ''' Example Data Loader for the LIDC data set. This dataloader expects preprocessed data in .npy or .npz files per patient and a pandas dataframe in the same directory containing the meta-info e.g. file paths, labels, foregound slice-ids. ''' import numpy as np import os from collections import OrderedDict import pandas as pd import pickle import time import subprocess import utils.dataloader_utils as dutils # batch generator tools from https://github.com/ivanwilliammd/batchgenerators from batchgenerators.dataloading.data_loader import SlimDataLoaderBase from batchgenerators.transforms.spatial_transforms import MirrorTransform as Mirror from batchgenerators.transforms.abstract_transforms import Compose from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter from batchgenerators.dataloading import SingleThreadedAugmenter from batchgenerators.transforms.spatial_transforms import SpatialTransform from batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform from batchgenerators.transforms.utility_transforms import ConvertSegToBoundingBoxCoordinates def get_train_generators(cf, logger): """ wrapper function for creating the training batch generator pipeline. returns the train/val generators. selects patients according to cv folds (generated by first run/fold of experiment): splits the data into n-folds, where 1 split is used for val, 1 split for testing and the rest for training. (inner loop test set) If cf.hold_out_test_set is True, adds the test split to the training data. """ all_data = load_dataset(cf, logger) all_pids_list = np.unique([v['pid'] for (k, v) in all_data.items()]) if not cf.created_fold_id_pickle: fg = dutils.fold_generator(seed=cf.seed, n_splits=cf.n_cv_splits, len_data=len(all_pids_list)).get_fold_names() with open(os.path.join(cf.exp_dir, 'fold_ids.pickle'), 'wb') as handle: pickle.dump(fg, handle) cf.created_fold_id_pickle = True else: with open(os.path.join(cf.exp_dir, 'fold_ids.pickle'), 'rb') as handle: fg = pickle.load(handle) train_ix, val_ix, test_ix, _ = fg[cf.fold] train_pids = [all_pids_list[ix] for ix in train_ix] val_pids = [all_pids_list[ix] for ix in val_ix] if cf.hold_out_test_set: train_pids += [all_pids_list[ix] for ix in test_ix] train_data = {k: v for (k, v) in all_data.items() if any(p == v['pid'] for p in train_pids)} val_data = {k: v for (k, v) in all_data.items() if any(p == v['pid'] for p in val_pids)} logger.info("data set loaded with: {} train / {} val / {} test patients".format(len(train_ix), len(val_ix), len(test_ix))) batch_gen = {} batch_gen['train'] = create_data_gen_pipeline(train_data, cf=cf, is_training=True) batch_gen['val_sampling'] = create_data_gen_pipeline(val_data, cf=cf, is_training=False) if cf.val_mode == 'val_patient': batch_gen['val_patient'] = PatientBatchIterator(val_data, cf=cf) batch_gen['n_val'] = len(val_ix) if cf.max_val_patients is None else cf.max_val_patients else: batch_gen['n_val'] = cf.num_val_batches return batch_gen def get_test_generator(cf, logger): """ wrapper function for creating the test batch generator pipeline. selects patients according to cv folds (generated by first run/fold of experiment) If cf.hold_out_test_set is True, gets the data from an external folder instead. """ if cf.hold_out_test_set: cf.pp_data_path = cf.pp_test_data_path test_ix = None else: with open(os.path.join(cf.exp_dir, 'fold_ids.pickle'), 'rb') as handle: fold_list = pickle.load(handle) _, _, test_ix, _ = fold_list[cf.fold] # warnings.warn('WARNING: using validation set for testing!!!') test_data = load_dataset(cf, logger, test_ix) logger.info("data set loaded with: {} test patients".format(len(test_ix))) batch_gen = {} batch_gen['test'] = PatientBatchIterator(test_data, cf=cf) batch_gen['n_test'] = len(test_ix) return batch_gen def load_dataset(cf, logger, subset_ixs=None): """ loads the dataset. if deployed in cloud also copies and unpacks the data to the working directory. :param subset_ixs: subset indices to be loaded from the dataset. used e.g. for testing to only load the test folds. :return: data: dictionary with one entry per patient (in this case per patient-breast, since they are treated as individual images for training) each entry is a dictionary containing respective meta-info as well as paths to the preprocessed numpy arrays to be loaded during batch-generation """ if cf.server_env: copy_data = True target_dir = os.path.join('/ssd', cf.slurm_job_id, cf.pp_name, cf.crop_name) if not os.path.exists(target_dir): cf.data_source_dir = cf.pp_data_path os.makedirs(target_dir) subprocess.call('rsync -av {} {}'.format( os.path.join(cf.data_source_dir, cf.input_df_name), os.path.join(target_dir, cf.input_df_name)), shell=True) logger.info('created target dir and info df at {}'.format(os.path.join(target_dir, cf.input_df_name))) elif subset_ixs is None: copy_data = False cf.pp_data_path = target_dir p_df = pd.read_pickle(os.path.join(cf.pp_data_path, cf.input_df_name)) if cf.select_prototype_subset is not None: prototype_pids = p_df.pid.tolist()[:cf.select_prototype_subset] p_df = p_df[p_df.pid.isin(prototype_pids)] logger.warning('WARNING: using prototyping data subset!!!') if subset_ixs is not None: subset_pids = [np.unique(p_df.pid.tolist())[ix] for ix in subset_ixs] p_df = p_df[p_df.pid.isin(subset_pids)] logger.info('subset: selected {} instances from df'.format(len(p_df))) if cf.server_env: if copy_data: copy_and_unpack_data(logger, p_df.pid.tolist(), cf.fold_dir, cf.data_source_dir, target_dir) class_targets = p_df['class_target'].tolist() pids = p_df.pid.tolist() imgs = [os.path.join(cf.pp_data_path, '{}_img.npy'.format(pid)) for pid in pids] segs = [os.path.join(cf.pp_data_path,'{}_rois.npy'.format(pid)) for pid in pids] data = OrderedDict() for ix, pid in enumerate(pids): # for the experiment conducted here, texture scores are converted into 1 --> 1 = true_nodule targets = [0 for ii in class_targets[ix]] data[pid] = {'data': imgs[ix], 'seg': segs[ix], 'pid': pid, 'class_target': targets} data[pid]['fg_slices'] = p_df['fg_slices'].tolist()[ix] return data def create_data_gen_pipeline(patient_data, cf, is_training=True): """ create mutli-threaded train/val/test batch generation and augmentation pipeline. :param patient_data: dictionary containing one dictionary per patient in the train/test subset. :param is_training: (optional) whether to perform data augmentation (training) or not (validation/testing) :return: multithreaded_generator """ # create instance of batch generator as first element in pipeline. data_gen = BatchGenerator(patient_data, batch_size=cf.batch_size, cf=cf) # add transformations to pipeline. my_transforms = [] if is_training: mirror_transform = Mirror(axes=np.arange(cf.dim)) my_transforms.append(mirror_transform) spatial_transform = SpatialTransform(patch_size=cf.patch_size[:cf.dim], patch_center_dist_from_border=cf.da_kwargs['rand_crop_dist'], do_elastic_deform=cf.da_kwargs['do_elastic_deform'], alpha=cf.da_kwargs['alpha'], sigma=cf.da_kwargs['sigma'], do_rotation=cf.da_kwargs['do_rotation'], angle_x=cf.da_kwargs['angle_x'], angle_y=cf.da_kwargs['angle_y'], angle_z=cf.da_kwargs['angle_z'], do_scale=cf.da_kwargs['do_scale'], scale=cf.da_kwargs['scale'], random_crop=cf.da_kwargs['random_crop']) my_transforms.append(spatial_transform) # print('debug spatial_transform, my_transforms') # import IPython;IPython.embed() else: my_transforms.append(CenterCropTransform(crop_size=cf.patch_size[:cf.dim])) my_transforms.append(ConvertSegToBoundingBoxCoordinates(cf.dim, get_rois_from_seg_flag=False, class_specific_seg_flag=cf.class_specific_seg_flag)) # my_transforms.append(ConvertSegToBoundingBoxCoordinates(cf.dim, get_rois_from_seg_flag=True, class_specific_seg_flag=cf.class_specific_seg_flag)) all_transforms = Compose(my_transforms) multithreaded_generator = SingleThreadedAugmenter(data_gen, all_transforms) # multithreaded_generator = MultiThreadedAugmenter(data_gen, all_transforms, num_processes=cf.n_workers, seeds=range(cf.n_workers)) return multithreaded_generator class BatchGenerator(SlimDataLoaderBase): """ creates the training/validation batch generator. Samples n_batch_size patients (draws a slice from each patient if 2D) from the data set while maintaining foreground-class balance. Returned patches are cropped/padded to pre_crop_size. Actual patch_size is obtained after data augmentation. :param data: data dictionary as provided by 'load_dataset'. :param batch_size: number of patients to sample for the batch :return dictionary containing the batch data (b, c, x, y, (z)) / seg (b, 1, x, y, (z)) / pids / class_target """ def __init__(self, data, batch_size, cf): super(BatchGenerator, self).__init__(data, batch_size) self.cf = cf self.crop_margin = np.array(self.cf.patch_size)/8. #min distance of ROI center to edge of cropped_patch. self.p_fg = 0.5 def generate_train_batch(self): batch_data, batch_segs, batch_pids, batch_targets, batch_patient_labels = [], [], [], [], [] class_targets_list = [v['class_target'] for (k, v) in self._data.items()] # maybe error because at folds1, need one of each class --> 2 line dibawah ini #samples patients towards equilibrium of foreground classes on a roi-level (after randomly sampling the ratio "batch_sample_slack). # batch_ixs = dutils.get_class_balanced_patients( # class_targets_list, self.batch_size, self.cf.head_classes - 1, slack_factor=self.cf.batch_sample_slack) batch_ixs = [] batch_size = self.batch_size # num_classes = self.cf.head_classes - 1 class_targets=class_targets_list # np.random.seed(0) length = len(class_targets) for ix in range(batch_size): keep_looking = True while keep_looking: cand = np.random.choice(length, 1)[0] if (class_targets[cand].count(0) > 0): keep_looking = False batch_ixs.append(cand) # print(len(class_targets)) # print(batch_ixs) patients = list(self._data.items()) for b in batch_ixs: patient = patients[b][1] data = np.transpose(np.load(patient['data'], mmap_mode='r'), axes=(1, 2, 0))[np.newaxis] seg = np.transpose(np.load(patient['seg'], mmap_mode='r'), axes=(1, 2, 0)) batch_pids.append(patient['pid']) batch_targets.append(patient['class_target']) if self.cf.dim == 2: # draw random slice from patient while oversampling slices containing foreground objects with p_fg. if len(patient['fg_slices']) > 0: fg_prob = self.p_fg / len(patient['fg_slices']) bg_prob = (1 - self.p_fg) / (data.shape[3] - len(patient['fg_slices'])) slices_prob = [fg_prob if ix in patient['fg_slices'] else bg_prob for ix in range(data.shape[3])] slice_id = np.random.choice(data.shape[3], p=slices_prob) else: slice_id = np.random.choice(data.shape[3]) # if set to not None, add neighbouring slices to each selected slice in channel dimension. if self.cf.n_3D_context is not None: padded_data = dutils.pad_nd_image(data[0], [(data.shape[-1] + (self.cf.n_3D_context*2))], mode='constant') padded_slice_id = slice_id + self.cf.n_3D_context data = (np.concatenate([padded_data[..., ii][np.newaxis] for ii in range( padded_slice_id - self.cf.n_3D_context, padded_slice_id + self.cf.n_3D_context + 1)], axis=0)) else: data = data[..., slice_id] seg = seg[..., slice_id] # pad data if smaller than pre_crop_size. if np.any([data.shape[dim + 1] < ps for dim, ps in enumerate(self.cf.pre_crop_size)]): new_shape = [np.max([data.shape[dim + 1], ps]) for dim, ps in enumerate(self.cf.pre_crop_size)] data = dutils.pad_nd_image(data, new_shape, mode='constant') seg = dutils.pad_nd_image(seg, new_shape, mode='constant') # crop patches of size pre_crop_size, while sampling patches containing foreground with p_fg. crop_dims = [dim for dim, ps in enumerate(self.cf.pre_crop_size) if data.shape[dim + 1] > ps] if len(crop_dims) > 0: fg_prob_sample = np.random.rand(1) # with p_fg: sample random pixel from random ROI and shift center by random value. if fg_prob_sample < self.p_fg and np.sum(seg) > 0: seg_ixs = np.argwhere(seg == np.random.choice(np.unique(seg)[1:], 1)) roi_anchor_pixel = seg_ixs[np.random.choice(seg_ixs.shape[0], 1)][0] assert seg[tuple(roi_anchor_pixel)] > 0 # sample the patch center coords. constrained by edges of images - pre_crop_size /2. And by # distance to the desired ROI < patch_size /2. # (here final patch size to account for center_crop after data augmentation). sample_seg_center = {} for ii in crop_dims: low = np.max((self.cf.pre_crop_size[ii]//2, roi_anchor_pixel[ii] - (self.cf.patch_size[ii]//2 - self.crop_margin[ii]))) high = np.min((data.shape[ii + 1] - self.cf.pre_crop_size[ii]//2, roi_anchor_pixel[ii] + (self.cf.patch_size[ii]//2 - self.crop_margin[ii]))) # happens if lesion on the edge of the image. dont care about roi anymore, # just make sure pre-crop is inside image. if low >= high: low = data.shape[ii + 1] // 2 - (data.shape[ii + 1] // 2 - self.cf.pre_crop_size[ii] // 2) high = data.shape[ii + 1] // 2 + (data.shape[ii + 1] // 2 - self.cf.pre_crop_size[ii] // 2) sample_seg_center[ii] = np.random.randint(low=low, high=high) else: # not guaranteed to be empty. probability of emptiness depends on the data. sample_seg_center = {ii: np.random.randint(low=self.cf.pre_crop_size[ii]//2, high=data.shape[ii + 1] - self.cf.pre_crop_size[ii]//2) for ii in crop_dims} for ii in crop_dims: min_crop = int(sample_seg_center[ii] - self.cf.pre_crop_size[ii] // 2) max_crop = int(sample_seg_center[ii] + self.cf.pre_crop_size[ii] // 2) data = np.take(data, indices=range(min_crop, max_crop), axis=ii + 1) seg = np.take(seg, indices=range(min_crop, max_crop), axis=ii) batch_data.append(data) batch_segs.append(seg[np.newaxis]) data = np.array(batch_data).astype(np.float16) seg = np.array(batch_segs).astype(np.uint8) class_target = np.array(batch_targets) return {'data': data, 'seg': seg, 'pid': batch_pids, 'class_target': class_target} class PatientBatchIterator(SlimDataLoaderBase): """ creates a test generator that iterates over entire given dataset returning 1 patient per batch. Can be used for monitoring if cf.val_mode = 'patient_val' for a monitoring closer to actualy evaluation (done in 3D), if willing to accept speed-loss during training. :return: out_batch: dictionary containing one patient with batch_size = n_3D_patches in 3D or batch_size = n_2D_patches in 2D . """ def __init__(self, data, cf): #threads in augmenter super(PatientBatchIterator, self).__init__(data, 0) self.cf = cf self.patient_ix = 0 self.dataset_pids = [v['pid'] for (k, v) in data.items()] self.patch_size = cf.patch_size if len(self.patch_size) == 2: self.patch_size = self.patch_size + [1] def generate_train_batch(self): pid = self.dataset_pids[self.patient_ix] patient = self._data[pid] data = np.transpose(np.load(patient['data'], mmap_mode='r'), axes=(1, 2, 0)) seg = np.transpose(np.load(patient['seg'], mmap_mode='r'), axes=(1, 2, 0)) batch_class_targets = np.array([patient['class_target']]) # pad data if smaller than patch_size seen during training. if np.any([data.shape[dim] < ps for dim, ps in enumerate(self.patch_size)]): new_shape = [np.max([data.shape[dim], self.patch_size[dim]]) for dim, ps in enumerate(self.patch_size)] data = dutils.pad_nd_image(data, new_shape) # use 'return_slicer' to crop image back to original shape. seg = dutils.pad_nd_image(seg, new_shape) # get 3D targets for evaluation, even if network operates in 2D. 2D predictions will be merged to 3D in predictor. if self.cf.dim == 3 or self.cf.merge_2D_to_3D_preds: out_data = data[np.newaxis, np.newaxis] out_seg = seg[np.newaxis, np.newaxis] out_targets = batch_class_targets batch_3D = {'data': out_data, 'seg': out_seg, 'class_target': out_targets, 'pid': pid} converter = ConvertSegToBoundingBoxCoordinates(dim=3, get_rois_from_seg_flag=False, class_specific_seg_flag=self.cf.class_specific_seg_flag) # converter = ConvertSegToBoundingBoxCoordinates(dim=3, get_rois_from_seg_flag=True, class_specific_seg_flag=self.cf.class_specific_seg_flag) batch_3D = converter(**batch_3D) batch_3D.update({'patient_bb_target': batch_3D['bb_target'], 'patient_roi_labels': batch_3D['roi_labels'], 'original_img_shape': out_data.shape}) if self.cf.dim == 2: out_data = np.transpose(data, axes=(2, 0, 1))[:, np.newaxis] # (z, c, x, y ) out_seg = np.transpose(seg, axes=(2, 0, 1))[:, np.newaxis] out_targets = np.array(np.repeat(batch_class_targets, out_data.shape[0], axis=0)) # if set to not None, add neighbouring slices to each selected slice in channel dimension. if self.cf.n_3D_context is not None: slice_range = range(self.cf.n_3D_context, out_data.shape[0] + self.cf.n_3D_context) out_data = np.pad(out_data, ((self.cf.n_3D_context, self.cf.n_3D_context), (0, 0), (0, 0), (0, 0)), 'constant', constant_values=0) out_data = np.array( [np.concatenate([out_data[ii] for ii in range( slice_id - self.cf.n_3D_context, slice_id + self.cf.n_3D_context + 1)], axis=0) for slice_id in slice_range]) batch_2D = {'data': out_data, 'seg': out_seg, 'class_target': out_targets, 'pid': pid} converter = ConvertSegToBoundingBoxCoordinates(dim=2, get_rois_from_seg_flag=False, class_specific_seg_flag=self.cf.class_specific_seg_flag) # converter = ConvertSegToBoundingBoxCoordinates(dim=2, get_rois_from_seg_flag=True, class_specific_seg_flag=self.cf.class_specific_seg_flag) batch_2D = converter(**batch_2D) if self.cf.merge_2D_to_3D_preds: batch_2D.update({'patient_bb_target': batch_3D['patient_bb_target'], 'patient_roi_labels': batch_3D['patient_roi_labels'], 'original_img_shape': out_data.shape}) else: batch_2D.update({'patient_bb_target': batch_2D['bb_target'], 'patient_roi_labels': batch_2D['roi_labels'], 'original_img_shape': out_data.shape}) out_batch = batch_3D if self.cf.dim == 3 else batch_2D patient_batch = out_batch # crop patient-volume to patches of patch_size used during training. stack patches up in batch dimension. # in this case, 2D is treated as a special case of 3D with patch_size[z] = 1. if np.any([data.shape[dim] > self.patch_size[dim] for dim in range(3)]): patch_crop_coords_list = dutils.get_patch_crop_coords(data, self.patch_size) new_img_batch, new_seg_batch, new_class_targets_batch = [], [], [] for cix, c in enumerate(patch_crop_coords_list): seg_patch = seg[c[0]:c[1], c[2]: c[3], c[4]:c[5]] new_seg_batch.append(seg_patch) # if set to not None, add neighbouring slices to each selected slice in channel dimension. # correct patch_crop coordinates by added slices of 3D context. if self.cf.dim == 2 and self.cf.n_3D_context is not None: tmp_c_5 = c[5] + (self.cf.n_3D_context * 2) if cix == 0: data = np.pad(data, ((0, 0), (0, 0), (self.cf.n_3D_context, self.cf.n_3D_context)), 'constant', constant_values=0) else: tmp_c_5 = c[5] new_img_batch.append(data[c[0]:c[1], c[2]:c[3], c[4]:tmp_c_5]) data = np.array(new_img_batch)[:, np.newaxis] # (n_patches, c, x, y, z) seg = np.array(new_seg_batch)[:, np.newaxis] # (n_patches, 1, x, y, z) batch_class_targets = np.repeat(batch_class_targets, len(patch_crop_coords_list), axis=0) if self.cf.dim == 2: if self.cf.n_3D_context is not None: data = np.transpose(data[:, 0], axes=(0, 3, 1, 2)) else: # all patches have z dimension 1 (slices). discard dimension data = data[..., 0] seg = seg[..., 0] patch_batch = {'data': data, 'seg': seg, 'class_target': batch_class_targets, 'pid': pid} patch_batch['patch_crop_coords'] = np.array(patch_crop_coords_list) patch_batch['patient_bb_target'] = patient_batch['patient_bb_target'] patch_batch['patient_roi_labels'] = patient_batch['patient_roi_labels'] patch_batch['original_img_shape'] = patient_batch['original_img_shape'] converter = ConvertSegToBoundingBoxCoordinates(self.cf.dim, get_rois_from_seg_flag=False, class_specific_seg_flag=self.cf.class_specific_seg_flag) # converter = ConvertSegToBoundingBoxCoordinates(self.cf.dim, get_rois_from_seg_flag=True, class_specific_seg_flag=self.cf.class_specific_seg_flag) patch_batch = converter(**patch_batch) out_batch = patch_batch self.patient_ix += 1 if self.patient_ix == len(self.dataset_pids): self.patient_ix = 0 return out_batch def copy_and_unpack_data(logger, pids, fold_dir, source_dir, target_dir): start_time = time.time() with open(os.path.join(fold_dir, 'file_list.txt'), 'w') as handle: for pid in pids: handle.write('{}_img.npz\n'.format(pid)) handle.write('{}_rois.npz\n'.format(pid)) subprocess.call('rsync -av --files-from {} {} {}'.format(os.path.join(fold_dir, 'file_list.txt'), source_dir, target_dir), shell=True) dutils.unpack_dataset(target_dir) copied_files = os.listdir(target_dir) logger.info("copying and unpacking data set finsihed : {} files in target dir: {}. took {} sec".format( len(copied_files), target_dir, np.round(time.time() - start_time, 0)))
53.578288
205
0.643041
e7d2540bcb0bf79650e57e6833a3b305868e168f
285
py
Python
Basic_ML/q_lstsq.py
tjwei/taishin20190827
b9191f24d84b214b08c3482a04796c1c379a9039
[ "Apache-2.0" ]
3
2019-02-16T05:57:09.000Z
2019-09-16T07:07:18.000Z
Basic_ML/q_lstsq.py
tjwei/taishin20190827
b9191f24d84b214b08c3482a04796c1c379a9039
[ "Apache-2.0" ]
null
null
null
Basic_ML/q_lstsq.py
tjwei/taishin20190827
b9191f24d84b214b08c3482a04796c1c379a9039
[ "Apache-2.0" ]
8
2019-02-14T02:51:26.000Z
2019-10-07T07:44:24.000Z
# 常數項 # 產生隨機數據 X = np.random.normal(0, 3, size=(50,1)) one = np.ones_like(X) X = np.concatenate([X, one], axis=1) Y = X @ [3, 15] + np.random.normal(0, size=50) # 用 numpy 的 lstsq a = np.linalg.lstsq(X, Y)[0] print("a=", a) # 畫出來 plt.plot(X[:, 0], Y, 'o') plt.plot(X[:, 0], X @ a, 'o');
23.75
46
0.557895
d974acb00650054b4f1f00abad54b0334dcaf298
7,712
py
Python
rplugin/python3/denite/source/gitstatus.py
thecontinium/denite-git
aa2622315ad5ed45c4bb40a95a2182c6a5478540
[ "MIT" ]
null
null
null
rplugin/python3/denite/source/gitstatus.py
thecontinium/denite-git
aa2622315ad5ed45c4bb40a95a2182c6a5478540
[ "MIT" ]
null
null
null
rplugin/python3/denite/source/gitstatus.py
thecontinium/denite-git
aa2622315ad5ed45c4bb40a95a2182c6a5478540
[ "MIT" ]
null
null
null
# ============================================================================ # FILE: gitstatus.py # AUTHOR: Qiming Zhao <chemzqm@gmail.com> # License: MIT license # ============================================================================ # pylint: disable=E0401,C0411 import os import re import subprocess import shlex from itertools import filterfalse from .base import Base from denite import util from ..kind.file import Kind as File EMPTY_LINE = re.compile(r"^\s*$") STATUS_MAP = { ' ': ' ', 'M': '~', 'T': '~', 'A': '+', 'D': '-', 'R': '→', 'C': 'C', 'U': 'U', '?': '?'} def _parse_line(line, root, winnr): path = os.path.join(root, line[3:]) index_symbol = STATUS_MAP[line[0]] tree_symbol = STATUS_MAP[line[1]] word = "{0}{1} {2}".format(index_symbol, tree_symbol, line[3:]) return { 'word': word, 'action__path': path, 'source__root': root, 'Source__winnr': winnr, 'source__staged': index_symbol not in [' ', '?'], 'source__tree': tree_symbol not in [' ', '?'] } def run_command(commands, cwd, encoding='utf-8'): try: p = subprocess.run(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return [] return p.stdout.decode(encoding).split('\n') class Source(Base): def __init__(self, vim): super().__init__(vim) self.name = 'gitstatus' self.kind = Kind(vim) self.is_public_context = True def on_init(self, context): winnr = self.vim.call('winnr') gitdir = self.vim.call('denite#git#gitdir') context['__root'] = '' if not gitdir else os.path.dirname(gitdir) context['__winnr'] = winnr def highlight(self): self.vim.command('highlight deniteGitStatusAdd guifg=#009900 ctermfg=2') self.vim.command('highlight deniteGitStatusChange guifg=#bbbb00 ctermfg=3') self.vim.command('highlight deniteGitStatusDelete guifg=#ff2222 ctermfg=1') self.vim.command('highlight deniteGitStatusUnknown guifg=#5f5f5f ctermfg=59') def define_syntax(self): self.vim.command(r'syntax match deniteGitStatusHeader /^.*$/ ' + r'containedin=' + self.syntax_name) self.vim.command(r'syntax match deniteGitStatusSymbol /^\s*\zs\S\+/ ' + r'contained containedin=deniteGitStatusHeader') self.vim.command(r'syntax match deniteGitStatusAdd /+/ ' + r'contained containedin=deniteGitStatusSymbol') self.vim.command(r'syntax match deniteGitStatusDelete /-/ ' + r'contained containedin=deniteGitStatusSymbol') self.vim.command(r'syntax match deniteGitStatusChange /\~/ ' + r'contained containedin=deniteGitStatusSymbol') self.vim.command(r'syntax match deniteGitStatusUnknown /?/ ' + r'contained containedin=deniteGitStatusSymbol') def gather_candidates(self, context): root = context['__root'] winnr = context['__winnr'] if not root: return [] args = ['git', 'status', '--porcelain', '-uall'] self.print_message(context, ' '.join(args)) lines = run_command(args, root) candidates = [] for line in lines: if EMPTY_LINE.fullmatch(line): continue candidates.append(_parse_line(line, root, winnr)) return candidates class Kind(File): def __init__(self, vim): super().__init__(vim) self.persist_actions += ['reset', 'add', 'delete'] # pylint: disable=E1101 self.redraw_actions += ['reset', 'add', 'commit'] # pylint: disable=E1101 self.name = 'gitstatus' self._previewed_target = None val = self.vim.call('exists', ':Rm') if val == 2: self.remove = 'rm' elif self.vim.call('executable', 'rmtrash'): self.remove = 'rmtrash' else: self.remove = 'delete' def action_patch(self, context): args = [] root = context['targets'][0]['source__root'] for target in context['targets']: filepath = target['action__path'] args.append(os.path.relpath(filepath, root)) self.vim.command('terminal git add ' + ' '.join(args) + ' --patch') def action_add(self, context): args = ['git', 'add'] root = context['targets'][0]['source__root'] for target in context['targets']: filepath = target['action__path'] args.append(os.path.relpath(filepath, root)) run_command(args, root) def __get_preview_window(self): return next(filterfalse(lambda x: not x.options['previewwindow'], self.vim.windows), None) # diff action def action_delete(self, context): target = context['targets'][0] root = target['source__root'] winnr = target['Source__winnr'] gitdir = os.path.join(target['source__root'], '.git') preview_window = self.__get_preview_window() if preview_window: self.vim.command('pclose!') if self._previewed_target == target: return relpath = os.path.relpath(target['action__path'], root) prefix = '' if target['source__staged']: if target['source__tree']: if util.input(self.vim, context, 'Diff cached?[y/n]', 'y') == 'y': prefix = '--cached ' else: prefix = '--cached ' prev_id = self.vim.call('win_getid') self.vim.command(str(winnr) + 'wincmd w') self.vim.call('denite#git#diffPreview', prefix, relpath, gitdir) self.vim.call('win_gotoid', prev_id) self._previewed_target = target def action_reset(self, context): cwd = os.path.normpath(self.vim.eval('expand("%:p:h")')) for target in context['targets']: filepath = target['action__path'] root = target['source__root'] path = os.path.relpath(filepath, root) if target['source__tree'] and target['source__staged']: res = util.input(self.vim, context, 'Select action reset or checkout [r/c]') if res == 'c': args = 'git checkout -- ' + path run_command(shlex.split(args), root) elif res == 'r': args = 'git reset HEAD -- ' + path run_command(shlex.split(args), root) elif target['source__tree']: args = 'git checkout -- ' + path run_command(shlex.split(args), root) elif target['source__staged']: args = 'git reset HEAD -- ' + path run_command(shlex.split(args), root) else: if self.remove == 'rm': self.vim.command('Rm ' + os.path.relpath(filepath, cwd)) elif self.remove == 'rmtrash': run_command(['rmtrash', filepath], root) else: self.vim.call('delete', filepath) self.vim.command('checktime') def action_commit(self, context): root = context['targets'][0]['source__root'] files = [] for target in context['targets']: filepath = target['action__path'] files.append(os.path.relpath(filepath, root)) self.vim.call('denite#git#commit', '-v', files)
36.037383
92
0.548107
5aa641f85a5e9cdbf555e256b1a3ebf272e8dc52
10,697
py
Python
amfeti/parallelization_managers/mpi_manager.py
AppliedMechanics/AMfe
be209dffe4d170aca735f1e912fd5cb448502119
[ "BSD-3-Clause" ]
21
2017-06-01T15:55:33.000Z
2022-03-13T08:43:31.000Z
amfeti/parallelization_managers/mpi_manager.py
AppliedMechanics/AMfeti
be209dffe4d170aca735f1e912fd5cb448502119
[ "BSD-3-Clause" ]
1
2022-01-08T07:20:15.000Z
2022-01-13T23:56:33.000Z
amfeti/parallelization_managers/mpi_manager.py
AppliedMechanics/AMfeti
be209dffe4d170aca735f1e912fd5cb448502119
[ "BSD-3-Clause" ]
10
2018-01-11T23:48:55.000Z
2022-01-12T15:58:54.000Z
# # Copyright (c) 2020 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS, # BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE. # # Distributed under 3-Clause BSD license. See LICENSE file for more information. # """ MPI-manager module for parallelization """ import os import logging import subprocess import shutil import numpy as np from copy import copy, deepcopy from .parallelization_manager_base import ParallelizationManagerBase from amfeti.solver_managers.local_solver_manager import LocalSolverManager from amfeti.tools import invert_dictionary_with_iterables, amfeti_dir from amfeti.config_base import ConfigBase from .tools import load_object, run_shell_script __all__ = ['MPIManager'] class MPIManager(ParallelizationManagerBase): """ MPI-manager, that sets up and configures the MPI-process Attributes ---------- _python_file : str name of Python-module, that shall be executed by each MPI-rank _file_extension : str file-type of serialized objects _path_prefix : str prefix for serialized local files, that are loaded into each MPI-rank _rank_path : dict absolute paths to each seralized file, that belongs to a MPI-rank """ def __init__(self, **kwargs): """ Parameters ---------- kwargs : dict Additional MPI-arguments handed over as keyword-arguments Returns ------- None """ super().__init__() mpi_exec = 'mpiexec' try: mpi_path = os.environ['MPIDIR'] mpi_exec = os.path.join(mpi_path, mpi_exec).replace('"', '') except: logger = logging.getLogger(__name__) logger.warning("Warning! Using mpiexec in global path") try: python_path = os.environ['PYTHON_ENV'] python_exec = os.path.join(python_path, 'python').replace('"', '') except: logger = logging.getLogger(__name__) logger.warning("Warning! Using python in global path") python_exec = 'python' self.set_config({'write_log': True, 'mpi_exec': mpi_exec, 'mpi_args': '', 'mpi_size': None, 'mpi_rank2problems': None, 'python_exec': python_exec, 'additional_mpi_args': kwargs, 'solution_path': None}) self._python_file = 'mpi_local_processor.py' self._file_extension = '.pkl' self._path_prefix = 'mpi_rank_' self._rank_path = dict() def read_solutions(self): """ Loads solutions from serialized solution-files Parameters ---------- None Returns ------- solution : SolutionBase solution-object """ logger = logging.getLogger(__name__) solution = dict() for rank_id in self._config_dict['mpi_rank2problems'].keys(): path = os.path.join(self._config_dict['solution_path'], self._path_prefix) + str(rank_id) + '_solution' + \ self._file_extension solution[rank_id] = load_object(path) logger.debug('Solution read') return solution def load_local_problems(self): """ Loads serialized local problems Parameters ---------- None Returns ------- None """ local_problems_dict = dict() for rank_id in self._config_dict['mpi_rank2problems'].keys(): path = os.path.join(self._config_dict['tmp_folder_absolute'], self._path_prefix) + str(rank_id) + \ self._file_extension local_solver_manager = load_object(path) for problem_id, problem in local_solver_manager._local_problems_dict.items(): local_problems_dict[problem_id] = problem return local_problems_dict def set_up_parallel_process(self, local_problems_dict, solver, local_solver_manager_config): """ Sets up the parallel solution-process by serializing local problems and configuring local solver-managers. Parameters ---------- local_problems_dict : dict local problems solver : GlobalSolverBase iterative solver for the interface-problem local_solver_manager_config : dict configuration of the local solver-manager Returns ------- None """ self._create_temporary_folder() if self._config_dict['mpi_rank2problems'] is None: rank2problems = dict() rank = 0 for problem_id in local_problems_dict.keys(): if rank not in rank2problems: rank2problems[rank] = np.array([problem_id]) rank += 1 else: rank2problems[rank] = np.append(rank2problems[rank], problem_id) self.set_config({'mpi_rank2problems': rank2problems}) self.set_config({'mpi_size': len(list(self._config_dict['mpi_rank2problems'].keys()))}) problem2rank_map = dict() for rank, problems in self._config_dict['mpi_rank2problems'].items(): for problem_id in problems: problem2rank_map[problem_id] = rank local_solver_manager_config.update({'parallel_processor': 'MPILocalProcessor', 'local_problems_full': list(local_problems_dict.keys())}) command = self._create_command_string() self._create_launcher_script('run_mpi', command) for rank_id, local_problems_in_rank in self._config_dict['mpi_rank2problems'].items(): local_problems_ranksubset = {problem_id: local_problems_dict[problem_id] for problem_id in local_problems_in_rank} curr_local_solver_manager_config = copy(local_solver_manager_config) secondary_problems = () interface2rank_map_subset = dict() for problem_id, local_problem in local_problems_ranksubset.items(): local_interfaces = local_problem.interfaces for other_id, other_problem in local_problems_dict.items(): if other_id not in local_problems_ranksubset.keys(): for interface in local_interfaces: if interface in other_problem.interfaces: interface2rank_map_subset[interface] = problem2rank_map[other_id] break if other_id not in secondary_problems: secondary_problems += (other_id,) local_problem2rank_map = copy(problem2rank_map) for problem_id in local_problems_dict.keys(): if problem_id in local_problems_in_rank: del (local_problem2rank_map[problem_id]) curr_local_solver_manager_config['parallel_processor_opt'] = {'interface2rank_map': interface2rank_map_subset, 'problem2rank_map': local_problem2rank_map, 'total_ranks_list': list(self._config_dict['mpi_rank2problems'].keys())} curr_local_solver_manager_config['secondary_problems'] = secondary_problems local_solver_manager = LocalSolverManager(local_problems_ranksubset, solver) local_solver_manager.set_config(copy(curr_local_solver_manager_config)) local_name = self._path_prefix + str(rank_id) + self._file_extension self._rank_path[rank_id] = self._create_serialized_file(local_solver_manager, local_name) def launch_parallel_process(self): """ Run the shell-script Parameters ---------- None Returns ------- None """ logger = logging.getLogger(__name__) if not os.path.exists(self._launcher_script_path): raise FileNotFoundError('File path %s does not exist.' % self._launcher_script_path) # executing script try: os.chdir(self._config_dict['tmp_folder_absolute']) os.chmod(self._launcher_script_path, 0o775) logger.info('Run directory = %s' % os.getcwd()) logger.info('######################################################################') run_shell_script('run_mpi.sh') os.chdir(self._local_folder) except: raise RuntimeError('Error during the simulation.') def _create_command_string(self): """ Creates string, that is written in the shell-script Parameters ---------- None Returns ------- None """ python_file_path = os.path.join(amfeti_dir('parallelization_managers'), self._python_file) command_list = ['"' + self._config_dict['mpi_exec'] + '"', self._config_dict['mpi_args'], '-n', str(self._config_dict['mpi_size']), '"' + self._config_dict['python_exec'] + '"', '"' + python_file_path + '"'] if self._config_dict['solution_path'] is None: self._config_dict['solution_path'] = copy(self._config_dict['tmp_folder_absolute']) command = ' '.join(command_list) command += ' "' + 'prefix' + '=' + os.path.join(self._config_dict['tmp_folder_absolute'], self._path_prefix) + '" '#os.path.join(self._config_dict['tmp_folder'], self._path_prefix) command += ' "' + 'ext' + '=' + self._file_extension + '" ' command += ' "' + 'solution' + '=' + os.path.join(self._config_dict['solution_path'], self._path_prefix) + '" ' for key, value in self._config_dict['additional_mpi_args'].items(): command += ' "' + str(key) + '=' + str(value) + '" ' # export results to a log file called amfeti_solver.log if self._config_dict['write_log']: command += '>mpi.log' logger = logging.getLogger(__name__) logger.info('######################################################################') logger.info('###################### SOLVER INFO ###################################') logger.info('MPI exec path = %s' % self._config_dict['mpi_exec']) logger.info('Python exec path = %s' % self._config_dict['python_exec']) return command
37.932624
189
0.585398
cd581c7b810ea9af469cc077af7cd095e82b1115
1,012
py
Python
stubs.min/System/Windows/Forms/__init___parts/ArrangeDirection.py
ricardyn/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
1
2021-02-02T13:39:16.000Z
2021-02-02T13:39:16.000Z
stubs.min/System/Windows/Forms/__init___parts/ArrangeDirection.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
stubs.min/System/Windows/Forms/__init___parts/ArrangeDirection.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
class ArrangeDirection(Enum,IComparable,IFormattable,IConvertible): """ Specifies the direction in which the system arranges minimized windows. enum (flags) ArrangeDirection,values: Down (4),Left (0),Right (0),Up (4) """ def __eq__(self,*args): """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self,*args): """ __format__(formattable: IFormattable,format: str) -> str """ pass def __ge__(self,*args): pass def __gt__(self,*args): pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self,*args): pass def __lt__(self,*args): pass def __ne__(self,*args): pass def __reduce_ex__(self,*args): pass def __str__(self,*args): pass Down=None Left=None Right=None Up=None value__=None
28.111111
215
0.663043
b7e9059dc0e3849a525cf0f88fe9e59541db1fda
962
py
Python
Lib/site-packages/notebook/services/nbconvert/tests/test_nbconvert_api.py
edupyter/EDUPYTER38
396183cea72987506f1ef647c0272a2577c56218
[ "bzip2-1.0.6" ]
null
null
null
Lib/site-packages/notebook/services/nbconvert/tests/test_nbconvert_api.py
edupyter/EDUPYTER38
396183cea72987506f1ef647c0272a2577c56218
[ "bzip2-1.0.6" ]
null
null
null
Lib/site-packages/notebook/services/nbconvert/tests/test_nbconvert_api.py
edupyter/EDUPYTER38
396183cea72987506f1ef647c0272a2577c56218
[ "bzip2-1.0.6" ]
null
null
null
from notebook.utils import url_path_join from notebook.tests.launchnotebook import NotebookTestBase class NbconvertAPI: """Wrapper for nbconvert API calls.""" def __init__(self, request): self.request = request def _req(self, verb, path, body=None, params=None): response = self.request(verb, url_path_join('api/nbconvert', path), data=body, params=params, ) response.raise_for_status() return response def list_formats(self): return self._req('GET', '') class APITest(NotebookTestBase): def setUp(self): self.nbconvert_api = NbconvertAPI(self.request) def test_list_formats(self): formats = self.nbconvert_api.list_formats().json() self.assertIsInstance(formats, dict) self.assertIn('python', formats) self.assertIn('html', formats) self.assertEqual(formats['python']['output_mimetype'], 'text/x-python')
33.172414
79
0.658004
1620db6ddb7f6e79c7e7b799b4be98ebad01e215
1,354
py
Python
util.py
226wyj/You-Draw-I-Guess
ed36d75540b595e2cd7ad2454a10fa94b58acb47
[ "MIT" ]
null
null
null
util.py
226wyj/You-Draw-I-Guess
ed36d75540b595e2cd7ad2454a10fa94b58acb47
[ "MIT" ]
null
null
null
util.py
226wyj/You-Draw-I-Guess
ed36d75540b595e2cd7ad2454a10fa94b58acb47
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -* import sys sys.path.append(".") import os import random import logging import torch as t import numpy as np def init_logger(): """将日志信息输出到控制台 Params: asctime: 打印日志的时间 levelname: 打印日志级别 name: 打印日志名字 message: 打印日志信息 """ logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) def check_path(args): print("Start checking path...") if not os.path.exists(args.data_path): print("Creating data path...") os.makedirs(args.data_path) if not os.path.exists(args.model_path): print("Creating model path...") os.makedirs(args.model_path) print("Check path done.") def set_seed(args): """ 为了得到可重复的实验结果需要对所有随机数生成器设置一个固定的种子 """ random.seed(args.seed) np.random.seed(args.seed) t.manual_seed(args.seed) if not args.no_cuda and t.cuda.is_available(): t.cuda.manual_seed_all(args.seed) def show_model(args): model_path = os.path.join(args.model_path, args.model_name) device = t.device("cuda:0" if t.cuda.is_available() and not args.no_cuda else "cpu") net = t.load(model_path, map_location=t.device(device)) print(type(net)) print(len(net)) for k in net.keys(): print(k)
25.54717
88
0.627031
79aa1fd7427b2d266cb41a74f9dca3ba11baa5e7
69,344
py
Python
runtests.py
larsmans/cython
4b29260d24af8fe7924efd2291a931e8bdf938f1
[ "Apache-2.0" ]
null
null
null
runtests.py
larsmans/cython
4b29260d24af8fe7924efd2291a931e8bdf938f1
[ "Apache-2.0" ]
null
null
null
runtests.py
larsmans/cython
4b29260d24af8fe7924efd2291a931e8bdf938f1
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python import os import sys import re import gc import locale import shutil import time import unittest import doctest import operator import subprocess import tempfile import traceback import warnings try: import platform IS_PYPY = platform.python_implementation() == 'PyPy' IS_CPYTHON = platform.python_implementation() == 'CPython' except (ImportError, AttributeError): IS_CPYTHON = True IS_PYPY = False try: from StringIO import StringIO except ImportError: from io import StringIO try: import cPickle as pickle except ImportError: import pickle try: from io import open as io_open except ImportError: from codecs import open as io_open try: import threading except ImportError: # No threads, no problems threading = None try: from collections import defaultdict except ImportError: class defaultdict(object): def __init__(self, default_factory=lambda : None): self._dict = {} self.default_factory = default_factory def __getitem__(self, key): if key not in self._dict: self._dict[key] = self.default_factory() return self._dict[key] def __setitem__(self, key, value): self._dict[key] = value def __repr__(self): return repr(self._dict) WITH_CYTHON = True CY3_DIR = None from distutils.dist import Distribution from distutils.core import Extension from distutils.command.build_ext import build_ext as _build_ext from distutils import sysconfig distutils_distro = Distribution() if sys.platform == 'win32': # TODO: Figure out why this hackery (see http://thread.gmane.org/gmane.comp.python.cython.devel/8280/). config_files = distutils_distro.find_config_files() try: config_files.remove('setup.cfg') except ValueError: pass distutils_distro.parse_config_files(config_files) cfgfiles = distutils_distro.find_config_files() try: cfgfiles.remove('setup.cfg') except ValueError: pass distutils_distro.parse_config_files(cfgfiles) EXT_DEP_MODULES = { 'tag:numpy' : 'numpy', 'tag:pstats': 'pstats', 'tag:posix' : 'posix', 'tag:array' : 'array', } def update_numpy_extension(ext): import numpy ext.include_dirs.append(numpy.get_include()) def update_openmp_extension(ext): ext.openmp = True language = ext.language if language == 'cpp': flags = OPENMP_CPP_COMPILER_FLAGS else: flags = OPENMP_C_COMPILER_FLAGS if flags: compile_flags, link_flags = flags ext.extra_compile_args.extend(compile_flags.split()) ext.extra_link_args.extend(link_flags.split()) return ext elif sys.platform == 'win32': return ext return EXCLUDE_EXT def get_openmp_compiler_flags(language): """ As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't (currently) check for other compilers. returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension """ if language == 'cpp': cc = sysconfig.get_config_var('CXX') else: cc = sysconfig.get_config_var('CC') if not cc: if sys.platform == 'win32': return '/openmp', '' return None # For some reason, cc can be e.g. 'gcc -pthread' cc = cc.split()[0] # Force english output env = os.environ.copy() env['LC_MESSAGES'] = 'C' matcher = re.compile(r"gcc version (\d+\.\d+)").search try: p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env) except EnvironmentError: # Be compatible with Python 3 warnings.warn("Unable to find the %s compiler: %s: %s" % (language, os.strerror(sys.exc_info()[1].errno), cc)) return None _, output = p.communicate() output = output.decode(locale.getpreferredencoding() or 'ASCII', 'replace') gcc_version = matcher(output) if not gcc_version: return None # not gcc - FIXME: do something about other compilers compiler_version = gcc_version.group(1) if compiler_version and compiler_version.split('.') >= ['4', '2']: return '-fopenmp', '-fopenmp' try: locale.setlocale(locale.LC_ALL, '') except locale.Error: pass OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c') OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp') # Return this from the EXT_EXTRAS matcher callback to exclude the extension EXCLUDE_EXT = object() EXT_EXTRAS = { 'tag:numpy' : update_numpy_extension, 'tag:openmp': update_openmp_extension, } # TODO: use tags VER_DEP_MODULES = { # tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e. # (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x (2,4) : (operator.lt, lambda x: x in ['run.extern_builtins_T258', 'run.builtin_sorted', 'run.reversed_iteration', ]), (2,5) : (operator.lt, lambda x: x in ['run.any', 'run.all', 'run.yield_from_pep380', # GeneratorExit 'run.generator_frame_cycle', # yield in try-finally 'run.relativeimport_T542', 'run.relativeimport_star_T542', 'run.initial_file_path', # relative import ]), (2,6) : (operator.lt, lambda x: x in ['run.print_function', 'run.language_level', # print function 'run.cython3', 'run.property_decorator_T593', # prop.setter etc. 'run.generators_py', # generators, with statement 'run.pure_py', # decorators, with statement 'run.purecdef', 'run.struct_conversion', # memory views require buffer protocol 'memoryview.cythonarray', 'memoryview.memslice', 'memoryview.numpy_memoryview', 'memoryview.memoryviewattrs', 'memoryview.memoryview', ]), (2,7) : (operator.lt, lambda x: x in ['run.withstat_py', # multi context with statement 'run.yield_inside_lambda', ]), # The next line should start (3,); but this is a dictionary, so # we can only have one (3,) key. Since 2.7 is supposed to be the # last 2.x release, things would have to change drastically for this # to be unsafe... (2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3', 'run.test_raisefrom', ]), (3,): (operator.ge, lambda x: x in ['run.non_future_division', 'compile.extsetslice', 'compile.extdelslice', 'run.special_methods_T561_py2']), } # files that should not be converted to Python 3 code with 2to3 KEEP_2X_FILES = [ os.path.join('Cython', 'Debugger', 'Tests', 'test_libcython_in_gdb.py'), os.path.join('Cython', 'Debugger', 'Tests', 'test_libpython_in_gdb.py'), os.path.join('Cython', 'Debugger', 'libcython.py'), os.path.join('Cython', 'Debugger', 'libpython.py'), ] COMPILER = None INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ] CFLAGS = os.getenv('CFLAGS', '').split() CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split() TEST_SUPPORT_DIR = 'testsupport' BACKENDS = ['c', 'cpp'] def memoize(f): uncomputed = object() f._cache = {} def func(*args): res = f._cache.get(args, uncomputed) if res is uncomputed: res = f._cache[args] = f(*args) return res return func @memoize def parse_tags(filepath): tags = defaultdict(list) f = io_open(filepath, encoding='ISO-8859-1', errors='replace') try: for line in f: line = line.strip() if not line: continue if line[0] != '#': break ix = line.find(':') if ix != -1: tag = line[1:ix].strip() values = line[ix+1:].split(',') tags[tag].extend([value.strip() for value in values]) finally: f.close() return tags list_unchanging_dir = memoize(lambda x: os.listdir(x)) class build_ext(_build_ext): def build_extension(self, ext): try: try: # Py2.7+ & Py3.2+ compiler_obj = self.compiler_obj except AttributeError: compiler_obj = self.compiler if ext.language == 'c++': compiler_obj.compiler_so.remove('-Wstrict-prototypes') if CCACHE: compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc': ext.extra_compile_args.append('/openmp') except Exception: pass _build_ext.build_extension(self, ext) class ErrorWriter(object): match_error = re.compile('(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match def __init__(self): self.output = [] self.write = self.output.append def _collect(self, collect_errors, collect_warnings): s = ''.join(self.output) result = [] for line in s.split('\n'): match = self.match_error(line) if match: is_warning, line, column, message = match.groups() if (is_warning and collect_warnings) or \ (not is_warning and collect_errors): result.append( (int(line), int(column), message.strip()) ) result.sort() return [ "%d:%d: %s" % values for values in result ] def geterrors(self): return self._collect(True, False) def getwarnings(self): return self._collect(False, True) def getall(self): return self._collect(True, True) class TestBuilder(object): def __init__(self, rootdir, workdir, selectors, exclude_selectors, annotate, cleanup_workdir, cleanup_sharedlibs, cleanup_failures, with_pyregr, cython_only, languages, test_bugs, fork, language_level): self.rootdir = rootdir self.workdir = workdir self.selectors = selectors self.exclude_selectors = exclude_selectors self.annotate = annotate self.cleanup_workdir = cleanup_workdir self.cleanup_sharedlibs = cleanup_sharedlibs self.cleanup_failures = cleanup_failures self.with_pyregr = with_pyregr self.cython_only = cython_only self.languages = languages self.test_bugs = test_bugs self.fork = fork self.language_level = language_level def build_suite(self): suite = unittest.TestSuite() filenames = os.listdir(self.rootdir) filenames.sort() for filename in filenames: path = os.path.join(self.rootdir, filename) if os.path.isdir(path) and filename != TEST_SUPPORT_DIR: if filename == 'pyregr' and not self.with_pyregr: continue if filename == 'broken' and not self.test_bugs: continue suite.addTest( self.handle_directory(path, filename)) if sys.platform not in ['win32']: # Non-Windows makefile. if [1 for selector in self.selectors if selector("embedded")] \ and not [1 for selector in self.exclude_selectors if selector("embedded")]: suite.addTest(unittest.makeSuite(EmbedTest)) return suite def handle_directory(self, path, context): workdir = os.path.join(self.workdir, context) if not os.path.exists(workdir): os.makedirs(workdir) suite = unittest.TestSuite() filenames = list_unchanging_dir(path) filenames.sort() for filename in filenames: filepath = os.path.join(path, filename) module, ext = os.path.splitext(filename) if ext not in ('.py', '.pyx', '.srctree'): continue if filename.startswith('.'): continue # certain emacs backup files tags = parse_tags(filepath) fqmodule = "%s.%s" % (context, module) if not [ 1 for match in self.selectors if match(fqmodule, tags) ]: continue if self.exclude_selectors: if [1 for match in self.exclude_selectors if match(fqmodule, tags)]: continue mode = 'run' # default if tags['mode']: mode = tags['mode'][0] elif context == 'pyregr': mode = 'pyregr' if ext == '.srctree': if 'cpp' not in tags['tag'] or 'cpp' in self.languages: suite.addTest(EndToEndTest(filepath, workdir, self.cleanup_workdir)) continue # Choose the test suite. if mode == 'pyregr': if not filename.startswith('test_'): continue test_class = CythonPyregrTestCase elif mode == 'run': if module.startswith("test_"): test_class = CythonUnitTestCase else: test_class = CythonRunTestCase else: test_class = CythonCompileTestCase for test in self.build_tests(test_class, path, workdir, module, mode == 'error', tags): suite.addTest(test) if mode == 'run' and ext == '.py' and not self.cython_only: # additionally test file in real Python suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename))) return suite def build_tests(self, test_class, path, workdir, module, expect_errors, tags): if 'werror' in tags['tag']: warning_errors = True else: warning_errors = False if expect_errors: if 'cpp' in tags['tag'] and 'cpp' in self.languages: languages = ['cpp'] else: languages = self.languages[:1] else: languages = self.languages if 'cpp' in tags['tag'] and 'c' in languages: languages = list(languages) languages.remove('c') tests = [ self.build_test(test_class, path, workdir, module, language, expect_errors, warning_errors) for language in languages ] return tests def build_test(self, test_class, path, workdir, module, language, expect_errors, warning_errors): language_workdir = os.path.join(workdir, language) if not os.path.exists(language_workdir): os.makedirs(language_workdir) workdir = os.path.join(language_workdir, module) return test_class(path, workdir, module, language=language, expect_errors=expect_errors, annotate=self.annotate, cleanup_workdir=self.cleanup_workdir, cleanup_sharedlibs=self.cleanup_sharedlibs, cleanup_failures=self.cleanup_failures, cython_only=self.cython_only, fork=self.fork, language_level=self.language_level, warning_errors=warning_errors) class CythonCompileTestCase(unittest.TestCase): def __init__(self, test_directory, workdir, module, language='c', expect_errors=False, annotate=False, cleanup_workdir=True, cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False, fork=True, language_level=2, warning_errors=False): self.test_directory = test_directory self.workdir = workdir self.module = module self.language = language self.expect_errors = expect_errors self.annotate = annotate self.cleanup_workdir = cleanup_workdir self.cleanup_sharedlibs = cleanup_sharedlibs self.cleanup_failures = cleanup_failures self.cython_only = cython_only self.fork = fork self.language_level = language_level self.warning_errors = warning_errors unittest.TestCase.__init__(self) def shortDescription(self): return "compiling (%s) %s" % (self.language, self.module) def setUp(self): from Cython.Compiler import Options self._saved_options = [ (name, getattr(Options, name)) for name in ('warning_errors', 'error_on_unknown_names') ] self._saved_default_directives = Options.directive_defaults.items() Options.warning_errors = self.warning_errors if not os.path.exists(self.workdir): os.makedirs(self.workdir) if self.workdir not in sys.path: sys.path.insert(0, self.workdir) def tearDown(self): from Cython.Compiler import Options for name, value in self._saved_options: setattr(Options, name, value) Options.directive_defaults = dict(self._saved_default_directives) try: sys.path.remove(self.workdir) except ValueError: pass try: del sys.modules[self.module] except KeyError: pass cleanup = self.cleanup_failures or self.success cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup cleanup_lib_files = self.cleanup_sharedlibs and cleanup if os.path.exists(self.workdir): if cleanup_c_files and cleanup_lib_files: shutil.rmtree(self.workdir, ignore_errors=True) else: for rmfile in os.listdir(self.workdir): if not cleanup_c_files: if (rmfile[-2:] in (".c", ".h") or rmfile[-4:] == ".cpp" or rmfile.endswith(".html")): continue if not cleanup_lib_files and (rmfile.endswith(".so") or rmfile.endswith(".dll")): continue try: rmfile = os.path.join(self.workdir, rmfile) if os.path.isdir(rmfile): shutil.rmtree(rmfile, ignore_errors=True) else: os.remove(rmfile) except IOError: pass def runTest(self): self.success = False self.runCompileTest() self.success = True def runCompileTest(self): self.compile(self.test_directory, self.module, self.workdir, self.test_directory, self.expect_errors, self.annotate) def find_module_source_file(self, source_file): if not os.path.exists(source_file): source_file = source_file[:-1] return source_file def build_target_filename(self, module_name): target = '%s.%s' % (module_name, self.language) return target def related_files(self, test_directory, module_name): is_related = re.compile('%s_.*[.].*' % module_name).match return [filename for filename in list_unchanging_dir(test_directory) if is_related(filename)] def copy_files(self, test_directory, target_directory, file_list): for filename in file_list: shutil.copy(os.path.join(test_directory, filename), target_directory) def source_files(self, workdir, module_name, file_list): return ([self.build_target_filename(module_name)] + [filename for filename in file_list if not os.path.isfile(os.path.join(workdir, filename))]) def split_source_and_output(self, test_directory, module, workdir): source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx') source_and_output = io_open(source_file, 'rU', encoding='ISO-8859-1') try: out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]), 'w', encoding='ISO-8859-1') for line in source_and_output: if line.startswith("_ERRORS"): out.close() out = ErrorWriter() else: out.write(line) finally: source_and_output.close() try: geterrors = out.geterrors except AttributeError: out.close() return [] else: return geterrors() def run_cython(self, test_directory, module, targetdir, incdir, annotate, extra_compile_options=None): include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)] if incdir: include_dirs.append(incdir) source = self.find_module_source_file( os.path.join(test_directory, module + '.pyx')) target = os.path.join(targetdir, self.build_target_filename(module)) if extra_compile_options is None: extra_compile_options = {} try: CompilationOptions except NameError: from Cython.Compiler.Main import CompilationOptions from Cython.Compiler.Main import compile as cython_compile from Cython.Compiler.Main import default_options options = CompilationOptions( default_options, include_path = include_dirs, output_file = target, annotate = annotate, use_listing_file = False, cplus = self.language == 'cpp', language_level = self.language_level, generate_pxi = False, evaluate_tree_assertions = True, **extra_compile_options ) cython_compile(source, options=options, full_module_name=module) def run_distutils(self, test_directory, module, workdir, incdir, extra_extension_args=None): original_source = self.find_module_source_file( os.path.join(test_directory, module + '.pyx')) try: tags = parse_tags(original_source) except IOError: tags = {} cwd = os.getcwd() os.chdir(workdir) try: build_extension = build_ext(distutils_distro) build_extension.include_dirs = INCLUDE_DIRS[:] if incdir: build_extension.include_dirs.append(incdir) build_extension.finalize_options() if COMPILER: build_extension.compiler = COMPILER ext_compile_flags = CFLAGS[:] compiler = COMPILER or sysconfig.get_config_var('CC') if self.language == 'c' and compiler == 'gcc': ext_compile_flags.extend(['-std=c89', '-pedantic']) if build_extension.compiler == 'mingw32': ext_compile_flags.append('-Wno-format') if extra_extension_args is None: extra_extension_args = {} related_files = self.related_files(test_directory, module) self.copy_files(test_directory, workdir, related_files) extension = Extension( module, sources = self.source_files(workdir, module, related_files), extra_compile_args = ext_compile_flags, **extra_extension_args ) if self.language == 'cpp': # Set the language now as the fixer might need it extension.language = 'c++' for matcher, fixer in EXT_EXTRAS.items(): if isinstance(matcher, str): del EXT_EXTRAS[matcher] matcher = string_selector(matcher) EXT_EXTRAS[matcher] = fixer if matcher(module, tags): newext = fixer(extension) if newext is EXCLUDE_EXT: return extension = newext or extension if self.language == 'cpp': extension.language = 'c++' build_extension.extensions = [extension] build_extension.build_temp = workdir build_extension.build_lib = workdir build_extension.run() finally: os.chdir(cwd) def compile(self, test_directory, module, workdir, incdir, expect_errors, annotate): expected_errors = errors = () if expect_errors: expected_errors = self.split_source_and_output( test_directory, module, workdir) test_directory = workdir if WITH_CYTHON: old_stderr = sys.stderr try: sys.stderr = ErrorWriter() self.run_cython(test_directory, module, workdir, incdir, annotate) errors = sys.stderr.geterrors() finally: sys.stderr = old_stderr if errors or expected_errors: try: for expected, error in zip(expected_errors, errors): self.assertEquals(expected, error) if len(errors) < len(expected_errors): expected_error = expected_errors[len(errors)] self.assertEquals(expected_error, None) elif len(errors) > len(expected_errors): unexpected_error = errors[len(expected_errors)] self.assertEquals(None, unexpected_error) except AssertionError: print("\n=== Expected errors: ===") print('\n'.join(expected_errors)) print("\n\n=== Got errors: ===") print('\n'.join(errors)) print('\n') raise else: if not self.cython_only: self.run_distutils(test_directory, module, workdir, incdir) class CythonRunTestCase(CythonCompileTestCase): def shortDescription(self): if self.cython_only: return CythonCompileTestCase.shortDescription(self) else: return "compiling (%s) and running %s" % (self.language, self.module) def run(self, result=None): if result is None: result = self.defaultTestResult() result.startTest(self) try: self.setUp() try: self.success = False self.runCompileTest() failures, errors = len(result.failures), len(result.errors) self.run_tests(result) if failures == len(result.failures) and errors == len(result.errors): # No new errors... self.success = True finally: check_thread_termination() except Exception: result.addError(self, sys.exc_info()) result.stopTest(self) try: self.tearDown() except Exception: pass def run_tests(self, result): if not self.cython_only: self.run_doctests(self.module, result) def run_doctests(self, module_name, result): def run_test(result): tests = doctest.DocTestSuite(module_name) tests.run(result) run_forked_test(result, run_test, self.shortDescription(), self.fork) def run_forked_test(result, run_func, test_name, fork=True): if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'): run_func(result) sys.stdout.flush() sys.stderr.flush() gc.collect() return # fork to make sure we do not keep the tested module loaded result_handle, result_file = tempfile.mkstemp() os.close(result_handle) child_id = os.fork() if not child_id: result_code = 0 try: try: tests = None try: partial_result = PartialTestResult(result) run_func(partial_result) sys.stdout.flush() sys.stderr.flush() gc.collect() except Exception: if tests is None: # importing failed, try to fake a test class tests = _FakeClass( failureException=sys.exc_info()[1], _shortDescription=test_name, module_name=None) partial_result.addError(tests, sys.exc_info()) result_code = 1 output = open(result_file, 'wb') pickle.dump(partial_result.data(), output) except: traceback.print_exc() finally: try: output.close() except: pass os._exit(result_code) try: cid, result_code = os.waitpid(child_id, 0) module_name = test_name.split()[-1] # os.waitpid returns the child's result code in the # upper byte of result_code, and the signal it was # killed by in the lower byte if result_code & 255: raise Exception("Tests in module '%s' were unexpectedly killed by signal %d"% (module_name, result_code & 255)) result_code >>= 8 if result_code in (0,1): input = open(result_file, 'rb') try: PartialTestResult.join_results(result, pickle.load(input)) finally: input.close() if result_code: raise Exception("Tests in module '%s' exited with status %d" % (module_name, result_code)) finally: try: os.unlink(result_file) except: pass class PureDoctestTestCase(unittest.TestCase): def __init__(self, module_name, module_path): self.module_name = module_name self.module_path = module_path unittest.TestCase.__init__(self, 'run') def shortDescription(self): return "running pure doctests in %s" % self.module_name def run(self, result=None): if result is None: result = self.defaultTestResult() loaded_module_name = 'pure_doctest__' + self.module_name result.startTest(self) try: self.setUp() import imp m = imp.load_source(loaded_module_name, self.module_path) try: doctest.DocTestSuite(m).run(result) finally: del m if loaded_module_name in sys.modules: del sys.modules[loaded_module_name] check_thread_termination() except Exception: result.addError(self, sys.exc_info()) result.stopTest(self) try: self.tearDown() except Exception: pass is_private_field = re.compile('^_[^_]').match class _FakeClass(object): def __init__(self, **kwargs): self._shortDescription = kwargs.get('module_name') self.__dict__.update(kwargs) def shortDescription(self): return self._shortDescription try: # Py2.7+ and Py3.2+ from unittest.runner import _TextTestResult except ImportError: from unittest import _TextTestResult class PartialTestResult(_TextTestResult): def __init__(self, base_result): _TextTestResult.__init__( self, self._StringIO(), True, base_result.dots + base_result.showAll*2) def strip_error_results(self, results): for test_case, error in results: for attr_name in filter(is_private_field, dir(test_case)): if attr_name == '_dt_test': test_case._dt_test = _FakeClass( name=test_case._dt_test.name) elif attr_name != '_shortDescription': setattr(test_case, attr_name, None) def data(self): self.strip_error_results(self.failures) self.strip_error_results(self.errors) return (self.failures, self.errors, self.testsRun, self.stream.getvalue()) def join_results(result, data): """Static method for merging the result back into the main result object. """ failures, errors, tests_run, output = data if output: result.stream.write(output) result.errors.extend(errors) result.failures.extend(failures) result.testsRun += tests_run join_results = staticmethod(join_results) class _StringIO(StringIO): def writeln(self, line): self.write("%s\n" % line) class CythonUnitTestCase(CythonRunTestCase): def shortDescription(self): return "compiling (%s) tests in %s" % (self.language, self.module) def run_tests(self, result): unittest.defaultTestLoader.loadTestsFromName(self.module).run(result) class CythonPyregrTestCase(CythonRunTestCase): def setUp(self): CythonRunTestCase.setUp(self) from Cython.Compiler import Options Options.error_on_unknown_names = False Options.directive_defaults.update(dict( binding=True, always_allow_keywords=True, set_initial_path="SOURCEFILE")) def _run_unittest(self, result, *classes): """Run tests from unittest.TestCase-derived classes.""" valid_types = (unittest.TestSuite, unittest.TestCase) suite = unittest.TestSuite() for cls in classes: if isinstance(cls, str): if cls in sys.modules: suite.addTest(unittest.findTestCases(sys.modules[cls])) else: raise ValueError("str arguments must be keys in sys.modules") elif isinstance(cls, valid_types): suite.addTest(cls) else: suite.addTest(unittest.makeSuite(cls)) suite.run(result) def _run_doctest(self, result, module): self.run_doctests(module, result) def run_tests(self, result): try: from test import support except ImportError: # Python2.x from test import test_support as support def run_test(result): def run_unittest(*classes): return self._run_unittest(result, *classes) def run_doctest(module, verbosity=None): return self._run_doctest(result, module) backup = (support.run_unittest, support.run_doctest) support.run_unittest = run_unittest support.run_doctest = run_doctest try: try: sys.stdout.flush() # helps in case of crashes module = __import__(self.module) sys.stdout.flush() # helps in case of crashes if hasattr(module, 'test_main'): module.test_main() sys.stdout.flush() # helps in case of crashes except (unittest.SkipTest, support.ResourceDenied): result.addSkip(self, 'ok') finally: support.run_unittest, support.run_doctest = backup run_forked_test(result, run_test, self.shortDescription(), self.fork) include_debugger = IS_CPYTHON and sys.version_info[:2] > (2, 5) def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors): def file_matches(filename): return filename.startswith("Test") and filename.endswith(".py") def package_matches(dirname): return dirname == "Tests" loader = unittest.TestLoader() if include_debugger: skipped_dirs = [] else: skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep] for dirpath, dirnames, filenames in os.walk(path): if dirpath != path and "__init__.py" not in filenames: skipped_dirs.append(dirpath + os.path.sep) continue skip = False for dir in skipped_dirs: if dirpath.startswith(dir): skip = True if skip: continue parentname = os.path.split(dirpath)[-1] if package_matches(parentname): for f in filenames: if file_matches(f): filepath = os.path.join(dirpath, f)[:-len(".py")] modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.') if not [ 1 for match in selectors if match(modulename) ]: continue if [ 1 for match in exclude_selectors if match(modulename) ]: continue module = __import__(modulename) for x in modulename.split('.')[1:]: module = getattr(module, x) suite.addTests([loader.loadTestsFromModule(module)]) def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors): def package_matches(dirname): if dirname == 'Debugger' and not include_debugger: return False return dirname not in ("Mac", "Distutils", "Plex") def file_matches(filename): filename, ext = os.path.splitext(filename) blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb', 'TestLibCython'] return (ext == '.py' and not '~' in filename and not '#' in filename and not filename.startswith('.') and not filename in blacklist) import doctest for dirpath, dirnames, filenames in os.walk(path): for dir in list(dirnames): if not package_matches(dir): dirnames.remove(dir) for f in filenames: if file_matches(f): if not f.endswith('.py'): continue filepath = os.path.join(dirpath, f) if os.path.getsize(filepath) == 0: continue filepath = filepath[:-len(".py")] modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.') if not [ 1 for match in selectors if match(modulename) ]: continue if [ 1 for match in exclude_selectors if match(modulename) ]: continue if 'in_gdb' in modulename: # These should only be imported from gdb. continue module = __import__(modulename) for x in modulename.split('.')[1:]: module = getattr(module, x) if hasattr(module, "__doc__") or hasattr(module, "__test__"): try: suite.addTest(doctest.DocTestSuite(module)) except ValueError: # no tests pass class EndToEndTest(unittest.TestCase): """ This is a test of build/*.srctree files, where srctree defines a full directory structure and its header gives a list of commands to run. """ cython_root = os.path.dirname(os.path.abspath(__file__)) def __init__(self, treefile, workdir, cleanup_workdir=True): self.name = os.path.splitext(os.path.basename(treefile))[0] self.treefile = treefile self.workdir = os.path.join(workdir, self.name) self.cleanup_workdir = cleanup_workdir cython_syspath = [self.cython_root] for path in sys.path: if path.startswith(self.cython_root) and path not in cython_syspath: # Py3 installation and refnanny build prepend their # fixed paths to sys.path => prefer that over the # generic one (cython_root itself goes last) cython_syspath.append(path) self.cython_syspath = os.pathsep.join(cython_syspath[::-1]) unittest.TestCase.__init__(self) def shortDescription(self): return "End-to-end %s" % self.name def setUp(self): from Cython.TestUtils import unpack_source_tree _, self.commands = unpack_source_tree(self.treefile, self.workdir) self.old_dir = os.getcwd() os.chdir(self.workdir) if self.workdir not in sys.path: sys.path.insert(0, self.workdir) def tearDown(self): if self.cleanup_workdir: for trial in range(5): try: shutil.rmtree(self.workdir) except OSError: time.sleep(0.1) else: break os.chdir(self.old_dir) def _try_decode(self, content): try: return content.decode() except UnicodeDecodeError: return content.decode('iso-8859-1') def runTest(self): self.success = False commands = (self.commands .replace("CYTHON", "PYTHON %s" % os.path.join(self.cython_root, 'cython.py')) .replace("PYTHON", sys.executable)) old_path = os.environ.get('PYTHONPATH') os.environ['PYTHONPATH'] = self.cython_syspath + os.pathsep + (old_path or '') try: for command in commands.split('\n'): p = subprocess.Popen(commands, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) out, err = p.communicate() res = p.returncode if res != 0: print(command) print(self._try_decode(out)) print(self._try_decode(err)) self.assertEqual(0, res, "non-zero exit status") finally: if old_path: os.environ['PYTHONPATH'] = old_path else: del os.environ['PYTHONPATH'] self.success = True # TODO: Support cython_freeze needed here as well. # TODO: Windows support. class EmbedTest(unittest.TestCase): working_dir = "Demos/embed" def setUp(self): self.old_dir = os.getcwd() os.chdir(self.working_dir) os.system( "make PYTHON='%s' clean > /dev/null" % sys.executable) def tearDown(self): try: os.system( "make PYTHON='%s' clean > /dev/null" % sys.executable) except: pass os.chdir(self.old_dir) def test_embed(self): from distutils import sysconfig libname = sysconfig.get_config_var('LIBRARY') libdir = sysconfig.get_config_var('LIBDIR') if not os.path.isdir(libdir) or libname not in os.listdir(libdir): libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib') if not os.path.isdir(libdir) or libname not in os.listdir(libdir): libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config') if not os.path.isdir(libdir) or libname not in os.listdir(libdir): # report the error for the original directory libdir = sysconfig.get_config_var('LIBDIR') cython = 'cython.py' if sys.version_info[0] >=3 and CY3_DIR: cython = os.path.join(CY3_DIR, cython) cython = os.path.abspath(os.path.join('..', '..', cython)) self.assert_(os.system( "make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)) == 0) try: os.remove('make.output') except OSError: pass class MissingDependencyExcluder: def __init__(self, deps): # deps: { matcher func : module name } self.exclude_matchers = [] for matcher, mod in deps.items(): try: __import__(mod) except ImportError: self.exclude_matchers.append(string_selector(matcher)) self.tests_missing_deps = [] def __call__(self, testname, tags=None): for matcher in self.exclude_matchers: if matcher(testname, tags): self.tests_missing_deps.append(testname) return True return False class VersionDependencyExcluder: def __init__(self, deps): # deps: { version : matcher func } from sys import version_info self.exclude_matchers = [] for ver, (compare, matcher) in deps.items(): if compare(version_info, ver): self.exclude_matchers.append(matcher) self.tests_missing_deps = [] def __call__(self, testname, tags=None): for matcher in self.exclude_matchers: if matcher(testname): self.tests_missing_deps.append(testname) return True return False class FileListExcluder: def __init__(self, list_file): self.excludes = {} f = open(list_file) try: for line in f.readlines(): line = line.strip() if line and line[0] != '#': self.excludes[line.split()[0]] = True finally: f.close() def __call__(self, testname, tags=None): return testname in self.excludes or testname.split('.')[-1] in self.excludes class TagsSelector: def __init__(self, tag, value): self.tag = tag self.value = value def __call__(self, testname, tags=None): if tags is None: return False else: return self.value in tags[self.tag] class RegExSelector: def __init__(self, pattern_string): try: self.pattern = re.compile(pattern_string, re.I|re.U) except re.error: print('Invalid pattern: %r' % pattern_string) raise def __call__(self, testname, tags=None): return self.pattern.search(testname) def string_selector(s): ix = s.find(':') if ix == -1: return RegExSelector(s) else: return TagsSelector(s[:ix], s[ix+1:]) class ShardExcludeSelector: # This is an exclude selector so it can override the (include) selectors. # It may not provide uniform distribution (in time or count), but is a # determanistic partition of the tests which is important. def __init__(self, shard_num, shard_count): self.shard_num = shard_num self.shard_count = shard_count def __call__(self, testname, tags=None): return abs(hash(testname)) % self.shard_count != self.shard_num def refactor_for_py3(distdir, cy3_dir): # need to convert Cython sources first import lib2to3.refactor from distutils.util import copydir_run_2to3 fixers = [ fix for fix in lib2to3.refactor.get_fixers_from_package("lib2to3.fixes") if fix.split('fix_')[-1] not in ('next',) ] if not os.path.exists(cy3_dir): os.makedirs(cy3_dir) import distutils.log as dlog dlog.set_threshold(dlog.INFO) copydir_run_2to3(distdir, cy3_dir, fixer_names=fixers, template = ''' global-exclude * graft Cython recursive-exclude Cython * recursive-include Cython *.py *.pyx *.pxd recursive-include Cython/Debugger/Tests * recursive-include Cython/Utility * recursive-exclude pyximport test include pyximport/*.py include runtests.py include cython.py ''') sys.path.insert(0, cy3_dir) for keep_2x_file in KEEP_2X_FILES: destfile = os.path.join(cy3_dir, keep_2x_file) shutil.copy(keep_2x_file, destfile) class PendingThreadsError(RuntimeError): pass threads_seen = [] def check_thread_termination(ignore_seen=True): if threading is None: # no threading enabled in CPython return current = threading.currentThread() blocking_threads = [] for t in threading.enumerate(): if not t.isAlive() or t == current: continue t.join(timeout=2) if t.isAlive(): if not ignore_seen: blocking_threads.append(t) continue for seen in threads_seen: if t is seen: break else: threads_seen.append(t) blocking_threads.append(t) if not blocking_threads: return sys.stderr.write("warning: left-over threads found after running test:\n") for t in blocking_threads: sys.stderr.write('...%s\n' % repr(t)) raise PendingThreadsError("left-over threads found after running test") def subprocess_output(cmd): try: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return p.communicate()[0].decode('UTF-8') except OSError: return '' def get_version(): from Cython.Compiler.Version import version as cython_version full_version = cython_version top = os.path.dirname(os.path.abspath(__file__)) if os.path.exists(os.path.join(top, '.git')): old_dir = os.getcwd() try: os.chdir(top) head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip() version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip() diff = subprocess_output(['git', 'diff', '--stat']).strip() if head_commit != version_commit: full_version += " " + head_commit if diff: full_version += ' + uncommitted changes' finally: os.chdir(old_dir) return full_version _orig_stdout, _orig_stderr = sys.stdout, sys.stderr def flush_and_terminate(status): try: _orig_stdout.flush() _orig_stderr.flush() finally: os._exit(status) def main(): global DISTDIR, WITH_CYTHON DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0])) from optparse import OptionParser parser = OptionParser() parser.add_option("--no-cleanup", dest="cleanup_workdir", action="store_false", default=True, help="do not delete the generated C files (allows passing --no-cython on next run)") parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs", action="store_false", default=True, help="do not delete the generated shared libary files (allows manual module experimentation)") parser.add_option("--no-cleanup-failures", dest="cleanup_failures", action="store_false", default=True, help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only") parser.add_option("--no-cython", dest="with_cython", action="store_false", default=True, help="do not run the Cython compiler, only the C compiler") parser.add_option("--compiler", dest="compiler", default=None, help="C compiler type") backend_list = ','.join(BACKENDS) parser.add_option("--backends", dest="backends", default=backend_list, help="select backends to test (default: %s)" % backend_list) parser.add_option("--no-c", dest="use_c", action="store_false", default=True, help="do not test C compilation backend") parser.add_option("--no-cpp", dest="use_cpp", action="store_false", default=True, help="do not test C++ compilation backend") parser.add_option("--no-unit", dest="unittests", action="store_false", default=True, help="do not run the unit tests") parser.add_option("--no-doctest", dest="doctests", action="store_false", default=True, help="do not run the doctests") parser.add_option("--no-file", dest="filetests", action="store_false", default=True, help="do not run the file based tests") parser.add_option("--no-pyregr", dest="pyregr", action="store_false", default=True, help="do not run the regression tests of CPython in tests/pyregr/") parser.add_option("--cython-only", dest="cython_only", action="store_true", default=False, help="only compile pyx to c, do not run C compiler or run the tests") parser.add_option("--no-refnanny", dest="with_refnanny", action="store_false", default=True, help="do not regression test reference counting") parser.add_option("--no-fork", dest="fork", action="store_false", default=True, help="do not fork to run tests") parser.add_option("--sys-pyregr", dest="system_pyregr", action="store_true", default=False, help="run the regression tests of the CPython installation") parser.add_option("-x", "--exclude", dest="exclude", action="append", metavar="PATTERN", help="exclude tests matching the PATTERN") parser.add_option("--shard_count", dest="shard_count", metavar="N", type=int, default=1, help="shard this run into several parallel runs") parser.add_option("--shard_num", dest="shard_num", metavar="K", type=int, default=-1, help="test only this single shard") parser.add_option("-C", "--coverage", dest="coverage", action="store_true", default=False, help="collect source coverage data for the Compiler") parser.add_option("--coverage-xml", dest="coverage_xml", action="store_true", default=False, help="collect source coverage data for the Compiler in XML format") parser.add_option("--coverage-html", dest="coverage_html", action="store_true", default=False, help="collect source coverage data for the Compiler in HTML format") parser.add_option("-A", "--annotate", dest="annotate_source", action="store_true", default=True, help="generate annotated HTML versions of the test source files") parser.add_option("--no-annotate", dest="annotate_source", action="store_false", help="do not generate annotated HTML versions of the test source files") parser.add_option("-v", "--verbose", dest="verbosity", action="count", default=0, help="display test progress, pass twice to print test names") parser.add_option("-T", "--ticket", dest="tickets", action="append", help="a bug ticket number to run the respective test in 'tests/*'") parser.add_option("-3", dest="language_level", action="store_const", const=3, default=2, help="set language level to Python 3 (useful for running the CPython regression tests)'") parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR", help="write test results in XML to directory DIR") parser.add_option("--exit-ok", dest="exit_ok", default=False, action="store_true", help="exit without error code even on test failures") parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'), help="working directory") parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'BUILD'), help="working directory") parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(), help="Cython installation directory (default: use local source version)") parser.add_option("--debug", dest="for_debugging", default=False, action="store_true", help="configure for easier use with a debugger (e.g. gdb)") parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true", help="use pyximport to automatically compile imported .pyx and .py files") parser.add_option("--watermark", dest="watermark", default=None, help="deterministic generated by string") options, cmd_args = parser.parse_args() WORKDIR = os.path.abspath(options.work_dir) if sys.version_info[0] >= 3: options.doctests = False if options.with_cython: sys.path.insert(0, options.cython_dir) try: # try if Cython is installed in a Py3 version import Cython.Compiler.Main except Exception: # back out anything the import process loaded, then # 2to3 the Cython sources to make them re-importable cy_modules = [ name for name in sys.modules if name == 'Cython' or name.startswith('Cython.') ] for name in cy_modules: del sys.modules[name] # hasn't been refactored yet - do it now global CY3_DIR CY3_DIR = cy3_dir = os.path.join(WORKDIR, 'Cy3') if sys.version_info >= (3,1): refactor_for_py3(DISTDIR, cy3_dir) elif os.path.isdir(cy3_dir): sys.path.insert(0, cy3_dir) else: options.with_cython = False if options.watermark: import Cython.Compiler.Version Cython.Compiler.Version.watermark = options.watermark WITH_CYTHON = options.with_cython coverage = None if options.coverage or options.coverage_xml or options.coverage_html: if options.shard_count <= 1 and options.shard_num < 0: if not WITH_CYTHON: options.coverage = options.coverage_xml = options.coverage_html = False else: print("Enabling coverage analysis") from coverage import coverage as _coverage coverage = _coverage(branch=True, omit=['Test*']) coverage.erase() coverage.start() if WITH_CYTHON: global CompilationOptions, pyrex_default_options, cython_compile from Cython.Compiler.Main import \ CompilationOptions, \ default_options as pyrex_default_options, \ compile as cython_compile from Cython.Compiler import Errors Errors.LEVEL = 0 # show all warnings from Cython.Compiler import Options Options.generate_cleanup_code = 3 # complete cleanup code from Cython.Compiler import DebugFlags DebugFlags.debug_temp_code_comments = 1 if options.shard_count > 1 and options.shard_num == -1: import multiprocessing pool = multiprocessing.Pool(options.shard_count) tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)] errors = [] for shard_num, return_code in pool.imap_unordered(runtests_callback, tasks): if return_code != 0: errors.append(shard_num) print("FAILED (%s/%s)" % (shard_num, options.shard_count)) print("ALL DONE (%s/%s)" % (shard_num, options.shard_count)) pool.close() pool.join() if errors: print("Errors for shards %s" % ", ".join([str(e) for e in errors])) return_code = 1 else: return_code = 0 else: _, return_code = runtests(options, cmd_args, coverage) print("ALL DONE") try: check_thread_termination(ignore_seen=False) sys.exit(return_code) except PendingThreadsError: # normal program exit won't kill the threads, do it the hard way here flush_and_terminate(return_code) def runtests_callback(args): options, cmd_args, shard_num = args options.shard_num = shard_num return runtests(options, cmd_args) def runtests(options, cmd_args, coverage=None): WITH_CYTHON = options.with_cython ROOTDIR = os.path.abspath(options.root_dir) WORKDIR = os.path.abspath(options.work_dir) if options.shard_num > -1: WORKDIR = os.path.join(WORKDIR, str(options.shard_num)) # RUN ALL TESTS! UNITTEST_MODULE = "Cython" UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE) if WITH_CYTHON: if os.path.exists(WORKDIR): for path in os.listdir(WORKDIR): if path in ("support", "Cy3"): continue shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True) if not os.path.exists(WORKDIR): os.makedirs(WORKDIR) if options.shard_num <= 0: sys.stderr.write("Python %s\n" % sys.version) sys.stderr.write("\n") if WITH_CYTHON: sys.stderr.write("Running tests against Cython %s\n" % get_version()) else: sys.stderr.write("Running tests without Cython.\n") if options.for_debugging: options.cleanup_workdir = False options.cleanup_sharedlibs = False options.fork = False if WITH_CYTHON and include_debugger: from Cython.Compiler.Main import default_options as compiler_default_options compiler_default_options['gdb_debug'] = True compiler_default_options['output_dir'] = os.getcwd() if options.with_refnanny: from pyximport.pyxbuild import pyx_to_dll libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"), build_in_temp=True, pyxbuild_dir=os.path.join(WORKDIR, "support")) sys.path.insert(0, os.path.split(libpath)[0]) CFLAGS.append("-DCYTHON_REFNANNY=1") if options.xml_output_dir and options.fork: # doesn't currently work together sys.stderr.write("Disabling forked testing to support XML test output\n") options.fork = False if WITH_CYTHON and options.language_level == 3: sys.stderr.write("Using Cython language level 3.\n") test_bugs = False if options.tickets: for ticket_number in options.tickets: test_bugs = True cmd_args.append('ticket:%s' % ticket_number) if not test_bugs: for selector in cmd_args: if selector.startswith('bugs'): test_bugs = True selectors = [ string_selector(r) for r in cmd_args ] if not selectors: selectors = [ lambda x, tags=None: True ] # Chech which external modules are not present and exclude tests # which depends on them (by prefix) missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES) version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES) exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit if options.exclude: exclude_selectors += [ string_selector(r) for r in options.exclude ] if options.shard_num > -1: exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count)) if not test_bugs: exclude_selectors += [ FileListExcluder(os.path.join(ROOTDIR, "bugs.txt")) ] if sys.platform in ['win32', 'cygwin'] and sys.version_info < (2,6): exclude_selectors += [ lambda x: x == "run.specialfloat" ] global COMPILER if options.compiler: COMPILER = options.compiler selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ] backends = [] for backend in selected_backends: if backend == 'c' and not options.use_c: continue elif backend == 'cpp' and not options.use_cpp: continue elif backend not in BACKENDS: sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % ( backend, ','.join(BACKENDS))) sys.exit(1) backends.append(backend) if options.shard_num <= 0: sys.stderr.write("Backends: %s\n" % ','.join(backends)) languages = backends sys.stderr.write("\n") test_suite = unittest.TestSuite() if options.unittests: collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors) if options.doctests: collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors) if options.filetests and languages: filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors, options.annotate_source, options.cleanup_workdir, options.cleanup_sharedlibs, options.cleanup_failures, options.pyregr, options.cython_only, languages, test_bugs, options.fork, options.language_level) test_suite.addTest(filetests.build_suite()) if options.system_pyregr and languages: sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test') if os.path.isdir(sys_pyregr_dir): filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors, options.annotate_source, options.cleanup_workdir, options.cleanup_sharedlibs, options.cleanup_failures, True, options.cython_only, languages, test_bugs, options.fork, sys.version_info[0]) sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir) test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr')) if options.xml_output_dir: from Cython.Tests.xmlrunner import XMLTestRunner test_runner = XMLTestRunner(output=options.xml_output_dir, verbose=options.verbosity > 0) else: test_runner = unittest.TextTestRunner(verbosity=options.verbosity) if options.pyximport_py: from pyximport import pyximport pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'), load_py_module_on_import_failure=True, inplace=True) result = test_runner.run(test_suite) if coverage is not None: coverage.stop() ignored_modules = ('Options', 'Version', 'DebugFlags', 'CmdLine') modules = [ module for name, module in sys.modules.items() if module is not None and name.startswith('Cython.Compiler.') and name[len('Cython.Compiler.'):] not in ignored_modules ] if options.coverage: coverage.report(modules, show_missing=0) if options.coverage_xml: coverage.xml_report(modules, outfile="coverage-report.xml") if options.coverage_html: coverage.html_report(modules, directory="coverage-report-html") if missing_dep_excluder.tests_missing_deps: sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n") for test in missing_dep_excluder.tests_missing_deps: sys.stderr.write(" %s\n" % test) if options.with_refnanny: import refnanny sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog])) if options.exit_ok: return options.shard_num, 0 else: return options.shard_num, not result.wasSuccessful() if __name__ == '__main__': try: main() except SystemExit: # <= Py2.4 ... raise except Exception: traceback.print_exc() try: check_thread_termination(ignore_seen=False) except PendingThreadsError: # normal program exit won't kill the threads, do it the hard way here flush_and_terminate(1)
39.489749
116
0.57907
beb8b9d7ed87096fcc11c55a1895f8aade8469b2
11,311
py
Python
laske_export/document/invoice_sales_order_adapter.py
suutari-ai/mvj
c39dbc692afcb3b26366783414c2d5a88a57b25a
[ "MIT" ]
1
2021-01-12T08:14:10.000Z
2021-01-12T08:14:10.000Z
laske_export/document/invoice_sales_order_adapter.py
suutari-ai/mvj
c39dbc692afcb3b26366783414c2d5a88a57b25a
[ "MIT" ]
249
2017-04-18T14:00:13.000Z
2022-03-30T12:18:03.000Z
laske_export/document/invoice_sales_order_adapter.py
suutari-ai/mvj
c39dbc692afcb3b26366783414c2d5a88a57b25a
[ "MIT" ]
7
2017-04-18T08:43:54.000Z
2021-07-28T07:29:30.000Z
from decimal import ROUND_HALF_UP, Decimal from dateutil.relativedelta import relativedelta from django.conf import settings from leasing.enums import InvoiceType, RentCycle from leasing.models.utils import get_next_business_day, is_business_day from .sales_order import BillingParty1, LineItem, OrderParty class InvoiceSalesOrderAdapter: def __init__( self, invoice=None, sales_order=None, receivable_type_rent=None, receivable_type_collateral=None, ): self.invoice = invoice self.sales_order = sales_order self.receivable_type_rent = receivable_type_rent self.receivable_type_collateral = receivable_type_collateral def get_bill_text(self): if ( self.invoice.billing_period_start_date and self.invoice.billing_period_end_date ): invoice_year = self.invoice.billing_period_start_date.year # TODO: Which rent rent = self.invoice.lease.get_active_rents_on_period( self.invoice.billing_period_start_date, self.invoice.billing_period_end_date, ).first() else: invoice_year = self.invoice.invoicing_date.year rent = self.invoice.lease.get_active_rents_on_period( self.invoice.invoicing_date, self.invoice.invoicing_date ).first() rent_calculation = self.invoice.lease.calculate_rent_amount_for_year( invoice_year ) year_rent = rent_calculation.get_total_amount() real_property_identifier = "" address = "" first_lease_area = self.invoice.lease.lease_areas.first() if first_lease_area: real_property_identifier = first_lease_area.identifier lease_area_address = first_lease_area.addresses.order_by( "-is_primary" ).first() if lease_area_address: address = lease_area_address.address bill_texts = [] row1 = "Vuokraustunnus: {lease_identifier} ".format( lease_identifier=self.invoice.lease.get_identifier_string() ) if ( self.invoice.billing_period_start_date and self.invoice.billing_period_end_date ): row1 += "Ajalta: {billing_period_start_date}-{billing_period_end_date} ".format( billing_period_start_date=self.invoice.billing_period_start_date.strftime( "%d.%m.%Y" ), billing_period_end_date=self.invoice.billing_period_end_date.strftime( "%d.%m.%Y" ), ) bill_texts.append(row1) row2 = "Päättymispvm: {lease_end_date} ".format( lease_end_date=self.invoice.lease.end_date.strftime("%d.%m.%Y") if self.invoice.lease.end_date else "-" ) if self.invoice.lease.intended_use: row2 += "Käyttötarkoitus: {lease_intended_use} ".format( lease_intended_use=self.invoice.lease.intended_use.name[:25] ) bill_texts.append(row2) # It's possible that the rent starts after the invoicing date, so there is no active rent. # Rather than trying to guess which rent to use to calculate the yearly cost and index check date, # ...just skip writing this one description row on the invoice. if rent: index_date = "1.1." if rent.cycle == RentCycle.APRIL_TO_MARCH: index_date = "1.4." bill_texts.append( "Indeksin tark.pvm: {index_date} Vuosivuokra: {year_rent} ".format( index_date=index_date, year_rent="{:.2f}".format( year_rent.quantize(Decimal(".01"), rounding=ROUND_HALF_UP) ).replace(".", ","), ) ) # noqa: E501 bill_texts.append( "Vuokrakohde: {real_property_identifier}, {address} ".format( real_property_identifier=real_property_identifier, address=address ) ) if self.invoice.notes: bill_texts.append(self.invoice.notes) return "\n".join(bill_texts) def get_first_tenant(self): for invoice_row in self.invoice.rows.all(): if not invoice_row.tenant: continue return invoice_row.tenant def get_contact_to_bill(self): tenant = self.get_first_tenant() # We need a tenant and time period to find the BILLING contact if not tenant or not self.invoice.billing_period_start_date: return self.invoice.recipient # This method returns the TENANT contact if there's no BILLING contact tenant_billingcontact = tenant.get_billing_tenantcontacts( self.invoice.billing_period_start_date, self.invoice.billing_period_end_date ).first() if not tenant_billingcontact: return self.invoice.recipient return tenant_billingcontact.contact def get_po_number(self): # Simply return the first reference ("viite") we come across for invoice_row in self.invoice.rows.filter(tenant__isnull=False): if invoice_row.tenant.reference: return invoice_row.tenant.reference[:35] def set_dates(self): billing_date = self.invoice.due_date.replace(day=1) self.sales_order.billing_date = billing_date.strftime("%Y%m%d") due_date = self.invoice.due_date if not is_business_day(due_date): due_date = get_next_business_day(due_date) self.invoice.adjusted_due_date = due_date self.invoice.save() value_date = due_date - relativedelta(days=settings.LASKE_DUE_DATE_OFFSET_DAYS) self.sales_order.value_date = value_date.strftime("%Y%m%d") def set_references(self): self.sales_order.reference = str(self.invoice.generate_number()) self.sales_order.reference_text = self.invoice.lease.get_identifier_string() def get_line_items(self): line_items = [] invoice_rows = self.invoice.rows.all() for i, invoice_row in enumerate(invoice_rows): line_item = LineItem() receivable_type = invoice_row.receivable_type # When dealing with rent invoice rows, we look up the SAP codes from the LeaseType object... if receivable_type == self.receivable_type_rent: line_item.material = self.invoice.lease.type.sap_material_code line_item.order_item_number = ( self.invoice.lease.type.sap_order_item_number ) # ...but in other cases the SAP codes are found in the ReceivableType object of the invoice row. elif receivable_type == self.receivable_type_collateral: # In case of a collateral ("Rahavakuus") row, need to populate ProfitCenter element instead line_item.profit_center = receivable_type.sap_order_item_number line_item.material = receivable_type.sap_material_code else: line_item.material = receivable_type.sap_material_code line_item.order_item_number = receivable_type.sap_order_item_number line_item.quantity = "1,00" line_item.net_price = "{:.2f}".format(invoice_row.amount).replace(".", ",") line1_strings = ["{}".format(invoice_row.receivable_type.name)] if ( invoice_row.billing_period_start_date and invoice_row.billing_period_end_date ): line1_strings.append( "{} - {}".format( invoice_row.billing_period_start_date.strftime("%d.%m.%Y"), invoice_row.billing_period_end_date.strftime("%d.%m.%Y"), ) ) line1_strings.append(" ") line_item.line_text_l1 = " ".join(line1_strings)[:70] if invoice_row.tenant: # NB! As can be seen below, here the billing_period_start_date was used twice originally. # I believe it's a mistake, but I'm leaving it here as a reminder in case some weird bugs pop up. # tenant_contact = invoice_row.tenant.get_tenant_tenantcontacts( # invoice_row.billing_period_start_date, # invoice_row.billing_period_start_date).first() start_date = self.invoice.billing_period_start_date end_date = self.invoice.billing_period_end_date # There might be invoices that have no billing_period_start and end_date at all! # If this is the case, use the invoicing date to find the proper contacts if not start_date and not end_date: start_date = end_date = self.invoice.invoicing_date tenant_contact = invoice_row.tenant.get_tenant_tenantcontacts( start_date, end_date ).first() if tenant_contact and tenant_contact.contact: line_item.line_text_l2 = "{} ".format( tenant_contact.contact.get_name()[:68] ) if i == len(invoice_rows) - 1: line_item.line_text_l4 = " Maksun suorittaminen: Maksu on suoritettava viimeistään eräpäivänä." line_item.line_text_l5 = ( " Eräpäivän jälkeen peritään korkolain mukainen viivästyskorko ja" ) line_item.line_text_l6 = ( " mahdollisista perimistoimenpiteistä perimispalkkio." ) line_items.append(line_item) return line_items def get_order_type(self): if self.invoice.type == InvoiceType.CHARGE: return "ZTY1" elif self.invoice.type == InvoiceType.CREDIT_NOTE: return "ZHY1" def get_original_order(self): if self.invoice.type == InvoiceType.CREDIT_NOTE: return str(self.invoice.credited_invoice.number) def get_sales_office(self): if self.invoice.lease.lessor and self.invoice.lease.lessor.sap_sales_office: return self.invoice.lease.lessor.sap_sales_office # TODO: Remove return "2826" def set_values(self): self.sales_order.set_bill_texts_from_string(self.get_bill_text()) contact_to_be_billed = self.get_contact_to_bill() order_party = OrderParty() order_party.from_contact(contact_to_be_billed) self.sales_order.order_party = order_party billing_party1 = BillingParty1() billing_party1.from_contact(contact_to_be_billed) self.sales_order.billing_party1 = billing_party1 self.sales_order.sales_office = self.get_sales_office() self.sales_order.po_number = self.get_po_number() self.sales_order.order_type = self.get_order_type() self.sales_order.original_order = self.get_original_order() self.set_dates() self.set_references() line_items = self.get_line_items() self.sales_order.line_items = line_items
39.41115
113
0.626116
38d9522252f22b68a0281224ce44c802e540c71a
26,957
py
Python
fastai/learner.py
maxpark/fastai
7dd70d4fd9ff1091557e09c9f00c8894b818544e
[ "Apache-2.0" ]
1
2022-03-13T00:09:58.000Z
2022-03-13T00:09:58.000Z
fastai/learner.py
maxpark/fastai
7dd70d4fd9ff1091557e09c9f00c8894b818544e
[ "Apache-2.0" ]
null
null
null
fastai/learner.py
maxpark/fastai
7dd70d4fd9ff1091557e09c9f00c8894b818544e
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13a_learner.ipynb (unless otherwise specified). from __future__ import annotations __all__ = ['replacing_yield', 'mk_metric', 'save_model', 'load_model', 'SkipToEpoch', 'Learner', 'before_batch_cb', 'load_learner', 'Metric', 'AvgMetric', 'AvgLoss', 'AvgSmoothLoss', 'ValueMetric', 'Recorder', 'CastToTensor'] # Cell #nbdev_comment from __future__ import annotations from .data.all import * from .optimizer import * from .callback.core import * import pickle,threading # Cell _all_ = ['CancelBackwardException', 'CancelStepException','CancelFitException','CancelEpochException', 'CancelTrainException','CancelValidException','CancelBatchException'] # Cell defaults.lr = 1e-3 # Cell def replacing_yield(o, attr, val): "Context manager to temporarily replace an attribute" old = getattr(o,attr) try: yield setattr(o,attr,val) finally: setattr(o,attr,old) # Cell def mk_metric(m): "Convert `m` to an `AvgMetric`, unless it's already a `Metric`" if isinstance(m,type): m = m() return m if isinstance(m, Metric) else AvgMetric(m) # Cell def save_model(file, model, opt, with_opt=True, pickle_protocol=2): "Save `model` to `file` along with `opt` (if available, and if `with_opt`)" if rank_distrib(): return # don't save if child proc if opt is None: with_opt=False state = get_model(model).state_dict() if with_opt: state = {'model': state, 'opt':opt.state_dict()} torch.save(state, file, pickle_protocol=pickle_protocol) # Cell def load_model(file, model, opt, with_opt=True, device=None, strict=True): "Load `model` from `file` along with `opt` (if available, and if `with_opt`)" distrib_barrier() if isinstance(device, int): device = torch.device('cuda', device) elif device is None: device = 'cpu' state = torch.load(file, map_location=device) hasopt = set(state)=={'model', 'opt'} model_state = state['model'] if hasopt else state get_model(model).load_state_dict(model_state, strict=strict) if hasopt and with_opt: try: opt.load_state_dict(state['opt']) except: if with_opt: warn("Could not load the optimizer state.") elif with_opt: warn("Saved filed doesn't contain an optimizer state.") # Cell def _try_concat(o): try: return torch.cat(o) except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L()) # Cell _before_epoch = [event.before_fit, event.before_epoch] _after_epoch = [event.after_epoch, event.after_fit] # Cell class _ConstantFunc(): "Returns a function that returns `o`" def __init__(self, o): self.o = o def __call__(self, *args, **kwargs): return self.o # Cell class SkipToEpoch(Callback): "Skip training up to `epoch`" order = 70 def __init__(self, epoch:int): self._skip_to = epoch def before_epoch(self): if self.epoch < self._skip_to: raise CancelEpochException # Cell _loop = ['Start Fit', 'before_fit', 'Start Epoch Loop', 'before_epoch', 'Start Train', 'before_train', 'Start Batch Loop', 'before_batch', 'after_pred', 'after_loss', 'before_backward', 'before_step', 'after_step', 'after_cancel_batch', 'after_batch','End Batch Loop','End Train', 'after_cancel_train', 'after_train', 'Start Valid', 'before_validate','Start Batch Loop', '**CBs same as train batch**', 'End Batch Loop', 'End Valid', 'after_cancel_validate', 'after_validate', 'End Epoch Loop', 'after_cancel_epoch', 'after_epoch', 'End Fit', 'after_cancel_fit', 'after_fit'] # Cell class Learner(GetAttr): _default='model' def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95)): path = Path(path) if path is not None else getattr(dls, 'path', Path('.')) if loss_func is None: loss_func = getattr(dls.train_ds, 'loss_func', None) assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function." self.dls,self.model = dls,model store_attr(but='dls,model,cbs') self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L() self.add_cbs(L(defaults.callbacks)+L(cbs)) self.lock = threading.Lock() self("after_create") @property def metrics(self): return self._metrics @metrics.setter def metrics(self,v): self._metrics = L(v).map(mk_metric) def _grab_cbs(self, cb_cls): return L(cb for cb in self.cbs if isinstance(cb, cb_cls)) def add_cbs(self, cbs): L(cbs).map(self.add_cb) return self def remove_cbs(self, cbs): L(cbs).map(self.remove_cb) return self def add_cb(self, cb): if isinstance(cb, type): cb = cb() cb.learn = self setattr(self, cb.name, cb) self.cbs.append(cb) return self def remove_cb(self, cb): if isinstance(cb, type): self.remove_cbs(self._grab_cbs(cb)) else: cb.learn = None if hasattr(self, cb.name): delattr(self, cb.name) if cb in self.cbs: self.cbs.remove(cb) return self @contextmanager def added_cbs(self, cbs): self.add_cbs(cbs) try: yield finally: self.remove_cbs(cbs) @contextmanager def removed_cbs(self, cbs): self.remove_cbs(cbs) try: yield self finally: self.add_cbs(cbs) def ordered_cbs(self, event): return [cb for cb in self.cbs.sorted('order') if hasattr(cb, event)] def __call__(self, event_name): L(event_name).map(self._call_one) def _call_one(self, event_name): if not hasattr(event, event_name): raise Exception(f'missing {event_name}') for cb in self.cbs.sorted('order'): cb(event_name) def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state) def create_opt(self): if isinstance(self.opt_func, partial): if 'lr' in self.opt_func.keywords: self.lr = self.opt_func.keywords['lr'] self.opt = self.opt_func(self.splitter(self.model), lr=self.lr) if not self.wd_bn_bias: for p in self._bn_bias_state(True ): p['do_wd'] = False if self.train_bn: for p in self._bn_bias_state(False): p['force_train'] = True def _split(self, b): i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1) self.xb,self.yb = b[:i],b[i:] def _with_events(self, f, event_type, ex, final=noop): try: self(f'before_{event_type}'); f() except ex: self(f'after_cancel_{event_type}') self(f'after_{event_type}'); final() def all_batches(self): self.n_iter = len(self.dl) for o in enumerate(self.dl): self.one_batch(*o) def _backward(self): self.loss_grad.backward() def _step(self): self.opt.step() def _do_one_batch(self): self.pred = self.model(*self.xb) self('after_pred') if len(self.yb): self.loss_grad = self.loss_func(self.pred, *self.yb) self.loss = self.loss_grad.clone() self('after_loss') if not self.training or not len(self.yb): return self._with_events(self._backward, 'backward', CancelBackwardException) self._with_events(self._step, 'step', CancelStepException) self.opt.zero_grad() def _set_device(self, b): model_device = torch.device(torch.cuda.current_device()) if next(self.model.parameters()).is_cuda else torch.device('cpu') dls_device = getattr(self.dls, 'device', default_device()) if model_device == dls_device: return to_device(b, dls_device) else: return to_device(b, model_device) def one_batch(self, i, b): self.iter = i b = self._set_device(b) self._split(b) self._with_events(self._do_one_batch, 'batch', CancelBatchException) def _do_epoch_train(self): self.dl = self.dls.train self._with_events(self.all_batches, 'train', CancelTrainException) def _do_epoch_validate(self, ds_idx=1, dl=None): if dl is None: dl = self.dls[ds_idx] self.dl = dl with torch.no_grad(): self._with_events(self.all_batches, 'validate', CancelValidException) def _do_epoch(self): self._do_epoch_train() self._do_epoch_validate() def _do_fit(self): for epoch in range(self.n_epoch): self.epoch=epoch self._with_events(self._do_epoch, 'epoch', CancelEpochException) def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False, start_epoch=0): if start_epoch != 0: cbs = L(cbs) + SkipToEpoch(start_epoch) with self.added_cbs(cbs): if reset_opt or not self.opt: self.create_opt() if wd is None: wd = self.wd if wd is not None: self.opt.set_hypers(wd=wd) self.opt.set_hypers(lr=self.lr if lr is None else lr) self.n_epoch = n_epoch self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup) def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None def __enter__(self): self(_before_epoch); return self def __exit__(self, exc_type, exc_value, tb): self(_after_epoch) def validation_context(self, cbs=None, inner=False): cms = [self.no_logging(),self.no_mbar(), self.lock] if cbs: cms.append(self.added_cbs(cbs)) if not inner: cms.append(self) return ContextManagers(cms) def validate(self, ds_idx=1, dl=None, cbs=None): if dl is None: dl = self.dls[ds_idx] with self.validation_context(cbs=cbs): self._do_epoch_validate(ds_idx, dl) return getattr(self, 'final_record', None) @delegates(GatherPredsCallback.__init__) def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None, inner=False, reorder=True, cbs=None, **kwargs): if dl is None: dl = self.dls[ds_idx].new(shuffle=False, drop_last=False) else: try: len(dl) except TypeError as e: raise TypeError(f"`dl` is {type(dl)} and doesn't have len(dl)") if isinstance(dl, DataLoader): if dl.drop_last: dl = dl.new(shuffle=False, drop_last=False) if reorder and hasattr(dl, 'get_idxs'): idxs = dl.get_idxs() dl = dl.new(get_idxs = _ConstantFunc(idxs)) cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs) ctx_mgrs = self.validation_context(cbs=L(cbs)+[cb], inner=inner) if with_loss: ctx_mgrs.append(self.loss_not_reduced()) with ContextManagers(ctx_mgrs): self._do_epoch_validate(dl=dl) if act is None: act = getattr(self.loss_func, 'activation', noop) res = cb.all_tensors() pred_i = 1 if with_input else 0 if res[pred_i] is not None: res[pred_i] = act(res[pred_i]) if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i])) if reorder and hasattr(dl, 'get_idxs'): res = nested_reorder(res, tensor(idxs).argsort()) return tuple(res) self._end_cleanup() def predict(self, item, rm_type_tfms=None, with_input=False): dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0) inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True) i = getattr(self.dls, 'n_inp', -1) inp = (inp,) if i==1 else tuplify(inp) dec = self.dls.decode_batch(inp + tuplify(dec_preds))[0] dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]]) res = dec_targ,dec_preds[0],preds[0] if with_input: res = (dec_inp,) + res return res def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs): if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle) b = dl.one_batch() _,_,preds = self.get_preds(dl=[b], with_decoded=True) dl.show_results(b, preds, max_n=max_n, **kwargs) def show_training_loop(self): indent = 0 for s in _loop: if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2 elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}') else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s)) @contextmanager def no_logging(self): return replacing_yield(self, 'logger', noop) @contextmanager def no_mbar(self): return replacing_yield(self, 'create_mbar', False) @contextmanager def loss_not_reduced(self): if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none') else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none')) def to_detach(self,b,cpu=True,gather=True): return self.dl.to_detach(b,cpu,gather) if hasattr(getattr(self,'dl',None),'to_detach') else to_detach(b,cpu,gather) def __getstate__(self): return {k:v for k,v in self.__dict__.items() if k!='lock'} def __setstate__(self, state): self.__dict__.update(state) self.lock = threading.Lock() Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i])) # Cell add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training", add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner", add_cb="Add `cb` to the list of `Callback` and register `self` as their learner", remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner", remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner", added_cbs="Context manage that temporarily adds `cbs`", removed_cbs="Context manage that temporarily removes `cbs`", ordered_cbs="List of `Callback`s, in order, for an `event` in the training loop", create_opt="Create an optimizer with default hyper-parameters", one_batch="Train or evaluate `self.model` on batch `(xb,yb)`", all_batches="Train or evaluate `self.model` on all the batches of `self.dl`", fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.", validate="Validate on `dl` with potential new `cbs`.", get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`", predict="Prediction on `item`, fully decoded, loss function decoded and probabilities", validation_context="A `ContextManagers` suitable for validation, with optional `cbs`", show_results="Show some predictions on `ds_idx`-th dataset or `dl`", show_training_loop="Show each step in the training loop", no_logging="Context manager to temporarily remove `logger`", no_mbar="Context manager to temporarily prevent the master progress bar from being created", loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.", to_detach="Calls `to_detach` if `self.dl` provides a `.to_detach` function otherwise calls global `to_detach`", __call__="Call `event_name` for all `Callback`s in `self.cbs`" ) # Cell if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback] # Cell def _before_batch_cb(f, self): xb,yb = f(self, self.xb, self.yb) self.learn.xb,self.learn.yb = xb,yb # Cell def before_batch_cb(f): "Shortcut for creating a Callback on the `before_batch` event, which takes and returns `xb,yb`" return Callback(before_batch=partial(_before_batch_cb, f)) # Cell @patch @delegates(save_model) def save(self:Learner, file, **kwargs): "Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`" file = join_path_file(file, self.path/self.model_dir, ext='.pth') save_model(file, self.model, getattr(self,'opt',None), **kwargs) return file # Cell @patch @delegates(load_model) def load(self:Learner, file, device=None, **kwargs): "Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`" if device is None and hasattr(self.dls, 'device'): device = self.dls.device if self.opt is None: self.create_opt() file = join_path_file(file, self.path/self.model_dir, ext='.pth') load_model(file, self.model, self.opt, device=device, **kwargs) return self # Cell @patch def export(self:Learner, fname='export.pkl', pickle_module=pickle, pickle_protocol=2): "Export the content of `self` without the items and the optimizer state for inference" if rank_distrib(): return # don't export if child proc self._end_cleanup() old_dbunch = self.dls self.dls = self.dls.new_empty() state = self.opt.state_dict() if self.opt is not None else None self.opt = None with warnings.catch_warnings(): #To avoid the warning that come from PyTorch about model not being checked warnings.simplefilter("ignore") torch.save(self, self.path/fname, pickle_module=pickle_module, pickle_protocol=pickle_protocol) self.create_opt() if state is not None: self.opt.load_state_dict(state) self.dls = old_dbunch # Cell def load_learner(fname, cpu=True, pickle_module=pickle): "Load a `Learner` object in `fname`, optionally putting it on the `cpu`" distrib_barrier() map_loc = 'cpu' if cpu else default_device() try: res = torch.load(fname, map_location=map_loc, pickle_module=pickle_module) except AttributeError as e: e.args = [f"Custom classes or functions exported with your `Learner` are not available in the namespace currently.\nPlease re-declare or import them before calling `load_learner`:\n\t{e.args[0]}"] raise if cpu: res.dls.cpu() if hasattr(res, 'mixed_precision'): res = res.to_fp32() elif hasattr(res, 'non_native_mixed_precision'): res = res.to_non_native_fp32() return res # Cell @docs class Metric(): "Blueprint for defining a metric" def reset(self): pass def accumulate(self, learn): pass @property def value(self): raise NotImplementedError @property def name(self): return class2attr(self, 'Metric') _docs = dict( reset="Reset inner state to prepare for new computation", name="Name of the `Metric`, camel-cased and with Metric removed", accumulate="Use `learn` to update the state with new results", value="The value of the metric") # Cell class AvgMetric(Metric): "Average the values of `func` taking into account potential different batch sizes" def __init__(self, func): self.func = func def reset(self): self.total,self.count = 0.,0 def accumulate(self, learn): bs = find_bs(learn.yb) self.total += learn.to_detach(self.func(learn.pred, *learn.yb))*bs self.count += bs @property def value(self): return self.total/self.count if self.count != 0 else None @property def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__ # Cell class AvgLoss(Metric): "Average the losses taking into account potential different batch sizes" def reset(self): self.total,self.count = 0.,0 def accumulate(self, learn): bs = find_bs(learn.yb) self.total += learn.to_detach(learn.loss.mean())*bs self.count += bs @property def value(self): return self.total/self.count if self.count != 0 else None @property def name(self): return "loss" # Cell class AvgSmoothLoss(Metric): "Smooth average of the losses (exponentially weighted with `beta`)" def __init__(self, beta=0.98): self.beta = beta def reset(self): self.count,self.val = 0,tensor(0.) def accumulate(self, learn): self.count += 1 self.val = torch.lerp(to_detach(learn.loss.mean()), self.val, self.beta) @property def value(self): return self.val/(1-self.beta**self.count) # Cell class ValueMetric(Metric): "Use to include a pre-calculated metric value (for instance calculated in a `Callback`) and returned by `func`" def __init__(self, func, metric_name=None): store_attr('func, metric_name') @property def value(self): return self.func() @property def name(self): return self.metric_name if self.metric_name else self.func.__name__ # Cell from fastprogress.fastprogress import format_time # Cell def _maybe_item(t): t = t.value try: return t.item() except: return t # Cell class Recorder(Callback): "Callback that registers statistics (lr, loss and metrics) during training" _stateattrs=('lrs','iters','losses','values') remove_on_fetch,order = True,50 def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98): store_attr('add_time,train_metrics,valid_metrics') self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta) def before_fit(self): "Prepare state for training" self.lrs,self.iters,self.losses,self.values = [],[],[],[] names = self.metrics.attrgot('name') if self.train_metrics and self.valid_metrics: names = L('loss') + names names = names.map('train_{}') + names.map('valid_{}') elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names else: names = L('train_loss') + names if self.add_time: names.append('time') self.metric_names = 'epoch'+names self.smooth_loss.reset() def after_batch(self): "Update all metrics and records lr and smooth loss in training" if len(self.yb) == 0: return mets = self._train_mets if self.training else self._valid_mets for met in mets: met.accumulate(self.learn) if not self.training: return self.lrs.append(self.opt.hypers[-1]['lr']) self.losses.append(self.smooth_loss.value) self.learn.smooth_loss = self.smooth_loss.value def before_epoch(self): "Set timer if `self.add_time=True`" self.cancel_train,self.cancel_valid = False,False if self.add_time: self.start_epoch = time.time() self.log = L(getattr(self, 'epoch', 0)) def before_train (self): self._train_mets[1:].map(Self.reset()) def before_validate(self): self._valid_mets.map(Self.reset()) def after_train (self): self.log += self._train_mets.map(_maybe_item) def after_validate(self): self.log += self._valid_mets.map(_maybe_item) def after_cancel_train(self): self.cancel_train = True def after_cancel_validate(self): self.cancel_valid = True def after_epoch(self): "Store and log the loss/metric values" self.learn.final_record = self.log[1:].copy() self.values.append(self.learn.final_record) if self.add_time: self.log.append(format_time(time.time() - self.start_epoch)) self.logger(self.log) self.iters.append(self.smooth_loss.count) @property def _train_mets(self): if getattr(self, 'cancel_train', False): return L() return L(self.smooth_loss) + (self.metrics if self.train_metrics else L()) @property def _valid_mets(self): if getattr(self, 'cancel_valid', False): return L() return (L(self.loss) + self.metrics if self.valid_metrics else L()) def plot_loss(self, skip_start=5, with_valid=True): plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train') if with_valid: idx = (np.array(self.iters)<skip_start).sum() valid_col = self.metric_names.index('valid_loss') - 1 plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(valid_col), label='valid') plt.legend() # Cell add_docs(Recorder, before_train = "Reset loss and metrics state", after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)", before_validate = "Reset loss and metrics state", after_validate = "Log loss and metric values on the validation set", after_cancel_train = "Ignore training metrics for this epoch", after_cancel_validate = "Ignore validation metrics for this epoch", plot_loss = "Plot the losses from `skip_start` and onward") if Recorder not in defaults.callbacks: defaults.callbacks.append(Recorder) # Internal Cell def _cast_tensor(x): if isinstance(x, tuple): return tuple(_cast_tensor(x_) for x_ in x) else: return cast(x, Tensor) if isinstance(x,torch.Tensor) else x # Cell class CastToTensor(Callback): "Cast Subclassed Tensors to `Tensor`" order=9 # Right before MixedPrecision def before_batch(self): self.learn.xb,self.learn.yb = _cast_tensor(self.learn.xb),_cast_tensor(self.learn.yb) # Cell if CastToTensor not in defaults.callbacks: defaults.callbacks.append(CastToTensor) # Cell @patch def freeze_to(self:Learner, n): if self.opt is None: self.create_opt() self.opt.freeze_to(n) self.opt.clear_state() @patch def freeze(self:Learner): self.freeze_to(-1) @patch def unfreeze(self:Learner): self.freeze_to(0) add_docs(Learner, freeze_to="Freeze parameter groups up to `n`", freeze="Freeze up to last parameter group", unfreeze="Unfreeze the entire model") # Cell @patch def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False): "Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation" if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False) if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms) try: self(_before_epoch) with dl.dataset.set_split_idx(0), self.no_mbar(): if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n))) aug_preds = [] for i in self.progress.mbar if hasattr(self,'progress') else range(n): self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch aug_preds.append(self.get_preds(dl=dl, inner=True)[0][None]) aug_preds = torch.cat(aug_preds) aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0) self.epoch = n with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(dl=dl, inner=True) finally: self(event.after_fit) if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta) return preds,targs
42.186228
204
0.666061
0491c8fb1ade03ff79099d9b70c2a8b65ee15c7a
3,259
py
Python
online_store/store/migrations/0001_initial.py
PhiVaLo/final_project2
3d61404f2fe4b9e4503087e523ea16d6c411f57f
[ "Apache-2.0" ]
null
null
null
online_store/store/migrations/0001_initial.py
PhiVaLo/final_project2
3d61404f2fe4b9e4503087e523ea16d6c411f57f
[ "Apache-2.0" ]
14
2020-12-06T13:31:34.000Z
2020-12-12T12:55:06.000Z
online_store/store/migrations/0001_initial.py
PhiVaLo/final_project2
3d61404f2fe4b9e4503087e523ea16d6c411f57f
[ "Apache-2.0" ]
1
2020-12-17T14:33:19.000Z
2020-12-17T14:33:19.000Z
# Generated by Django 3.1.3 on 2020-11-25 09:59 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Customer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200, null=True)), ('email', models.CharField(max_length=200, null=True)), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_order', models.DateTimeField(auto_now_add=True)), ('complete', models.BooleanField(default=False, null=True)), ('transaction_id', models.CharField(max_length=200, null=True)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.customer')), ], ), migrations.CreateModel( name='Product', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200, null=True)), ('price', models.DecimalField(decimal_places=2, max_digits=10)), ], ), migrations.CreateModel( name='ShippingAddress', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('address', models.CharField(max_length=200, null=True)), ('city', models.CharField(max_length=200, null=True)), ('zipcode', models.CharField(max_length=200, null=True)), ('date_added', models.DateTimeField(auto_now_add=True)), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.customer')), ('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.order')), ], ), migrations.CreateModel( name='OrderItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('quantity', models.IntegerField(blank=True, default=0, null=True)), ('date_added', models.DateTimeField(auto_now_add=True)), ('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.order')), ('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.product')), ], ), ]
48.641791
144
0.602946
8775a1857c7a9d1bf283838c09d3673e36d7a2ab
1,572
py
Python
Websocket/server_client_dualway2/client.py
likethe265/PythonCommonExample
788e74058dc8fb10cd9cce4e64d8dd721ee66c14
[ "Unlicense" ]
null
null
null
Websocket/server_client_dualway2/client.py
likethe265/PythonCommonExample
788e74058dc8fb10cd9cce4e64d8dd721ee66c14
[ "Unlicense" ]
null
null
null
Websocket/server_client_dualway2/client.py
likethe265/PythonCommonExample
788e74058dc8fb10cd9cce4e64d8dd721ee66c14
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python import asyncio import random import websockets class WebsocketClient: def __init__(self): self.clientID = random.randint(0, 100) self.event = asyncio.Event() async def eventGenerator(self): while True: await asyncio.sleep(1.5) self.event.set() async def consumer(self, message): print(f'server rec:{message}') async def receive(self, websocket): while True: msg = await websocket.recv() await self.consumer(msg) async def producer(self, websocket): while True: await self.event.wait() self.event.clear() msg = f'msg from client:{self.clientID}!' await websocket.send(msg) async def connectToServer(self): while True: try: async with websockets.connect('ws://localhost:8989') as websocket: tasks = [self.producer(websocket), self.receive(websocket)] await asyncio.gather(*tasks) except Exception as e: if type(e) == ConnectionRefusedError or type(e) == websockets.exceptions.ConnectionClosedError: print('Client is waiting for server launching...') await asyncio.sleep(5) else: raise e wsc = WebsocketClient() tasks = [wsc.connectToServer(), wsc.eventGenerator()] try: asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks)) except KeyboardInterrupt: print("Client crash...")
29.660377
111
0.591603
523d9e1b021cdcbfc921e79c29e3b7254ef8e208
628
py
Python
tests/settings.py
remarkablerocket/parkrundata
c717b59771629d6308ec093e29fd373981726fde
[ "BSD-3-Clause" ]
null
null
null
tests/settings.py
remarkablerocket/parkrundata
c717b59771629d6308ec093e29fd373981726fde
[ "BSD-3-Clause" ]
null
null
null
tests/settings.py
remarkablerocket/parkrundata
c717b59771629d6308ec093e29fd373981726fde
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 from __future__ import unicode_literals, absolute_import import django DEBUG = True USE_TZ = True # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:", } } ROOT_URLCONF = "tests.urls" INSTALLED_APPS = [ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", "parkrundata", ] SITE_ID = 1 if django.VERSION >= (1, 10): MIDDLEWARE = () else: MIDDLEWARE_CLASSES = ()
18.470588
66
0.595541
4a64068d2a55b6574f1cc9634a6c22ccd1ff7959
1,807
py
Python
viper/modules/triage.py
Mario-Kart-Felix/mal-scrap
bc396a15ea5b144eb1c0f05759d1f9419d6671df
[ "BSD-3-Clause" ]
2
2015-12-17T20:25:09.000Z
2017-10-08T19:14:57.000Z
viper/modules/triage.py
Mario-Kart-Felix/mal-scrap
bc396a15ea5b144eb1c0f05759d1f9419d6671df
[ "BSD-3-Clause" ]
1
2015-01-05T18:07:13.000Z
2015-01-07T21:43:57.000Z
viper/modules/triage.py
Mario-Kart-Felix/mal-scrap
bc396a15ea5b144eb1c0f05759d1f9419d6671df
[ "BSD-3-Clause" ]
3
2017-10-18T00:56:53.000Z
2020-05-24T09:38:54.000Z
# -*- coding: utf-8 -*- # This file is part of Viper - https://github.com/viper-framework/viper # See the file 'LICENSE' for copying permission. from viper.common.abstracts import Module from viper.core.database import Database from viper.core.session import __sessions__ class Triage(Module): cmd = 'triage' description = "Perform some initial triaging and tagging of the file" authors = ['nex'] def __init__(self): super(Triage, self).__init__() self.parser.add_argument('-a', '--all', action='store_true', help="Triage all files") def _triage_file_type(self, obj): tags = [] # TODO: extend this triaging with as many relevant tags as possible. # For example, avoid "exe" or other too common or obvious attributes. if 'PE32' in obj.type: if 'DLL' in obj.type: self.log('info', "{} is a DLL".format(obj.name)) tags.append('dll') elif 'native' in obj.type: self.log('info', "{} is a Windows driver".format(obj.name)) tags.append('driver') return tags def run(self): super(Triage, self).run() db = Database() if self.args and self.args.all: samples = db.find(key='all') for sample in samples: tags = [] tags.extend(self._triage_file_type(sample)) db.add_tags(sample.sha256, tags) # We're running against the already opened file. else: if not __sessions__.is_set(): self.log('error', "No open session") return tags = [] tags.extend(self._triage_file_type(__sessions__.current.file)) db.add_tags(__sessions__.current.file.sha256, tags)
31.701754
93
0.585501
983b896074c3807ee215c5b7f28c01dc4b27940e
233
py
Python
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Ariston/exc0003.py
moacirsouza/nadas
ad98d73b4281d1581fd2b2a9d29001acb426ee56
[ "MIT" ]
1
2020-07-03T13:54:18.000Z
2020-07-03T13:54:18.000Z
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Ariston/exc0003.py
moacirsouza/nadas
ad98d73b4281d1581fd2b2a9d29001acb426ee56
[ "MIT" ]
null
null
null
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Ariston/exc0003.py
moacirsouza/nadas
ad98d73b4281d1581fd2b2a9d29001acb426ee56
[ "MIT" ]
null
null
null
# Programa que ler 2 numeros e mostra a soma deles numero01=input('digite o primeiro número: ') numero02=input('digite o segundo número: ') print('\n A soma de {} com {} é {}'.format(numero01,numero02,(int(numero01)+int(numero02))))
46.6
92
0.716738
8852a38751796f4474e8f5616d1c38dadeb2167e
5,852
py
Python
src/globus_sdk/exc.py
mh-globus/globus-sdk-python
c740ebd85640d5c5fe92fd22e99ec05b1a280f6d
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/globus_sdk/exc.py
mh-globus/globus-sdk-python
c740ebd85640d5c5fe92fd22e99ec05b1a280f6d
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/globus_sdk/exc.py
mh-globus/globus-sdk-python
c740ebd85640d5c5fe92fd22e99ec05b1a280f6d
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import logging import requests log = logging.getLogger(__name__) class GlobusError(Exception): """ Root of the Globus Exception hierarchy. Stub class. """ class GlobusSDKUsageError(GlobusError, ValueError): """ A ``GlobusSDKUsageError`` may be thrown in cases in which the SDK detects that it is being used improperly. These errors typically indicate that some contract regarding SDK usage (e.g. required order of operations) has been violated. """ class GlobusAPIError(GlobusError): """ Wraps errors returned by a REST API. :ivar http_status: HTTP status code (int) :ivar code: Error code from the API (str), or "Error" for unclassified errors :ivar message: Error message from the API. In general, this will be more useful to developers, but there may be cases where it's suitable for display to end users. """ def __init__(self, r, *args, **kw): self._underlying_response = r self.http_status = r.status_code if "Content-Type" in r.headers and ( "application/json" in r.headers["Content-Type"] ): log.debug( "Content-Type on error is application/json. " "Doing error load from JSON" ) try: self._load_from_json(r.json()) except (KeyError, ValueError): log.error( "Error body could not be JSON decoded! " "This means the Content-Type is wrong, or the " "body is malformed!" ) self._load_from_text(r.text) else: log.debug( "Content-Type on error is unknown. " "Failing over to error load as text (default)" ) # fallback to using the entire body as the message for all # other types self._load_from_text(r.text) args = self._get_args() GlobusError.__init__(self, *args) @property def raw_json(self): """ Get the verbatim error message received from a Globus API, interpreted as a JSON string and evaluated as a *dict* If the body cannot be loaded as JSON, this is None """ r = self._underlying_response if "Content-Type" in r.headers and ( "application/json" in r.headers["Content-Type"] ): try: return r.json() except ValueError: log.error( "Error body could not be JSON decoded! " "This means the Content-Type is wrong, or the " "body is malformed!" ) return None else: return None @property def raw_text(self): """ Get the verbatim error message receved from a Globus API as a *string* """ return self._underlying_response.text def _get_args(self): """ Get arguments to pass to the Exception base class. These args are displayed in stack traces. """ return (self.http_status, self.code, self.message) def _load_from_json(self, data): """ Load error data from a JSON document. Must set at least code and message instance variables. """ if "errors" in data: if len(data["errors"]) != 1: log.warning( ( "Doing JSON load of error response with multiple " "errors. Exception data will only include the " "first error, but there are really {} errors" ).format(len(data["errors"])) ) # TODO: handle responses with more than one error data = data["errors"][0] self.code = data["code"] if "message" in data: log.debug( "Doing JSON load of error response with 'message' " "field. There may also be a useful 'detail' field " "to inspect" ) self.message = data["message"] else: self.message = data["detail"] def _load_from_text(self, text): """ Load error data from a raw text body that is not JSON. Must set at least code and message instance variables. """ self.code = "Error" self.message = text # Wrappers around requests exceptions, so the SDK is API independent. class NetworkError(GlobusError): """ Error communicating with the REST API server. Holds onto original exception data, but also takes a message to explain potentially confusing or inconsistent exceptions passed to us """ def __init__(self, msg, exc, *args, **kw): super().__init__(msg) self.underlying_exception = exc class GlobusTimeoutError(NetworkError): """The REST request timed out.""" class GlobusConnectionTimeoutError(GlobusTimeoutError): """The request timed out during connection establishment. These errors are safe to retry.""" class GlobusConnectionError(NetworkError): """A connection error occured while making a REST request.""" def convert_request_exception(exc): """Converts incoming requests.Exception to a Globus NetworkError""" if isinstance(exc, requests.ConnectTimeout): return GlobusConnectionTimeoutError("ConnectTimeoutError on request", exc) if isinstance(exc, requests.Timeout): return GlobusTimeoutError("TimeoutError on request", exc) elif isinstance(exc, requests.ConnectionError): return GlobusConnectionError("ConnectionError on request", exc) else: return NetworkError("NetworkError on request", exc)
32.692737
82
0.585954
031887e25bb5c8d7beb9ce3d258b4ce5768d9392
5,418
py
Python
azure_functions_devops_build/repository/repository_manager.py
coolgeeck/delwar1
5d3b2b5dc4933974ff26b0f0a869061129259046
[ "MIT" ]
16
2019-02-17T22:01:32.000Z
2022-03-31T22:59:46.000Z
azure_functions_devops_build/repository/repository_manager.py
coolgeeck/delwar1
5d3b2b5dc4933974ff26b0f0a869061129259046
[ "MIT" ]
19
2019-02-11T23:31:51.000Z
2021-06-01T23:20:35.000Z
azure_functions_devops_build/repository/repository_manager.py
coolgeeck/delwar1
5d3b2b5dc4933974ff26b0f0a869061129259046
[ "MIT" ]
21
2019-01-28T21:01:12.000Z
2022-03-07T16:18:29.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from msrest.service_client import ServiceClient from msrest import Configuration, Deserializer import vsts.git.v4_1.models.git_repository_create_options as git_repository_create_options from vsts.exceptions import VstsServiceError from ..base.base_manager import BaseManager from . import models from .local_git_utils import ( git_init, git_add_remote, git_remove_remote, git_stage_all, git_commit, git_push, does_git_exist, does_local_git_repository_exist, does_git_has_credential_manager, does_git_remote_exist, construct_git_remote_name, construct_git_remote_url ) class RepositoryManager(BaseManager): """ Manage DevOps repositories Attributes: See BaseManager """ def __init__(self, organization_name="", project_name="", creds=None): base_url = 'https://dev.azure.com' self._config = Configuration(base_url=base_url) self._client = ServiceClient(creds, self._config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._deserialize = Deserializer(client_models) super(RepositoryManager, self).__init__(creds, organization_name=organization_name, project_name=project_name) @staticmethod def check_git(): return does_git_exist() @staticmethod def check_git_local_repository(): return does_local_git_repository_exist() @staticmethod def check_git_credential_manager(): return does_git_has_credential_manager() # Check if the git repository exists first. If it does, check if the git remote exists. def check_git_remote(self, repository_name, remote_prefix): if not does_local_git_repository_exist(): return False remote_name = construct_git_remote_name( self._organization_name, self._project_name, repository_name, remote_prefix ) return does_git_remote_exist(remote_name) def remove_git_remote(self, repository_name, remote_prefix): remote_name = construct_git_remote_name( self._organization_name, self._project_name, repository_name, remote_prefix ) git_remove_remote(remote_name) def get_azure_devops_repository_branches(self, repository_name): try: result = self._git_client.get_branches(repository_name, self._project_name) except VstsServiceError: return [] return result def get_azure_devops_repository(self, repository_name): try: result = self._git_client.get_repository(repository_name, self._project_name) except VstsServiceError: return None return result def create_repository(self, repository_name): project = self._get_project_by_name(self._project_name) git_repo_options = git_repository_create_options.GitRepositoryCreateOptions( name=repository_name, project=project ) return self._git_client.create_repository(git_repo_options) def list_repositories(self): return self._git_client.get_repositories(self._project_name) def list_commits(self, repository_name): project = self._get_project_by_name(self._project_name) repository = self._get_repository_by_name(project, repository_name) return self._git_client.get_commits(repository.id, None, project=project.id) def get_local_git_remote_name(self, repository_name, remote_prefix): return construct_git_remote_name(self._organization_name, self._project_name, repository_name, remote_prefix) # Since the portal url and remote url are same. We only need one function to handle portal access and git push def get_azure_devops_repo_url(self, repository_name): return construct_git_remote_url(self._organization_name, self._project_name, repository_name) # The function will initialize a git repo, create git remote, stage all changes and commit the code # Exceptions: GitOperationException def setup_local_git_repository(self, repository_name, remote_prefix): remote_name = construct_git_remote_name( self._organization_name, self._project_name, repository_name, remote_prefix ) remote_url = construct_git_remote_url(self._organization_name, self._project_name, repository_name) if not does_local_git_repository_exist(): git_init() git_add_remote(remote_name, remote_url) git_stage_all() git_commit("Create function app with azure devops build. Remote repository url: {url}".format(url=remote_url)) # The function will push the current context in local git repository to Azure Devops # Exceptions: GitOperationException def push_local_to_azure_devops_repository(self, repository_name, remote_prefix, force): remote_name = construct_git_remote_name( self._organization_name, self._project_name, repository_name, remote_prefix ) git_push(remote_name, force)
42
118
0.706534
1df32817a54a7e37568bf44cf7f49207e1b74eaf
1,638
py
Python
sandbox/seg_inference.py
njanirudh/coding_task_make-a-model
12fa64ff6d7846c74624cc9856921f2aaba5c275
[ "MIT" ]
null
null
null
sandbox/seg_inference.py
njanirudh/coding_task_make-a-model
12fa64ff6d7846c74624cc9856921f2aaba5c275
[ "MIT" ]
null
null
null
sandbox/seg_inference.py
njanirudh/coding_task_make-a-model
12fa64ff6d7846c74624cc9856921f2aaba5c275
[ "MIT" ]
null
null
null
import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import torch from src.utils.custom_config import custom_parser_config from seg_trainer import SegmentationModule matplotlib.use('TKAgg', warn=False, force=True) if __name__ == "__main__": input_img = cv2.imread( "/home/anirudh/NJ/Interview/Pheno-Inspect/git_proj/coding_task_make-a-model/dataset/sugarbeet_weed_dataset/items/" "68653b6d-f406-442d-833e-31ffb43cf578/map/tileLayers/rgb/tiles/0-0-1.png") input_img = input_img[np.newaxis, ...] input_img = np.swapaxes(input_img, 1, 3) input_img = torch.from_numpy(input_img).float() MODEL_CHKP_PATH = "/home/anirudh/NJ/Interview/Pheno-Inspect/git_proj/coding_task_make-a-model/src/" \ "lightning_logs/version_198/checkpoints/epoch=91-step=183.ckpt" seg_inference = SegmentationModule(config_data=custom_parser_config, batch_size=5, epochs=150, gpu=1, train_mode=False) seg_inference.model.eval() seg_inference.load_state_dict(torch.load(MODEL_CHKP_PATH), strict=False) with torch.no_grad(): print(input_img.shape) output_seg = seg_inference(input_img) output_seg = torch.argmax(output_seg, 1) print(output_seg.shape) print(np.unique(output_seg)) output_seg = np.swapaxes(output_seg, 0, 2) print(np.unique(output_seg)) result = np.squeeze(output_seg) plt.imshow(result, cmap='Blues') plt.show()
38.093023
122
0.651404
698d5e312fb963f43e5d79f1a83246464d43bb70
832
py
Python
micone/conversion/network_converter.py
dileep-kishore/MiCoNE
10536113c5b28df06e522ba9967f7dc01baebae5
[ "MIT" ]
1
2019-07-23T13:54:17.000Z
2019-07-23T13:54:17.000Z
micone/conversion/network_converter.py
dileep-kishore/MiCoNE
10536113c5b28df06e522ba9967f7dc01baebae5
[ "MIT" ]
1
2020-06-01T15:45:12.000Z
2020-06-01T15:45:12.000Z
micone/conversion/network_converter.py
dileep-kishore/MiCoNE
10536113c5b28df06e522ba9967f7dc01baebae5
[ "MIT" ]
1
2019-08-03T21:19:35.000Z
2019-08-03T21:19:35.000Z
""" Module containing methods that convert networks into various formats """ import pathlib import pandas as pd from ..main import Network def json_to_elist(in_file: pathlib.Path, out_file: pathlib.Path) -> None: """ Convert Network file from json to elist format Note that only the edge attributes can be converted Parameters ---------- in_file : pathlib.Path The path to the json formatted network file out_file : pathlib.Path The path to the elist formatted network file """ network = Network.load_json(in_file) df = pd.DataFrame.from_dict(network.links) cols = list(df.columns) cols.remove("source") cols.remove("target") df = df[["source", "target"] + cols] df.to_csv(out_file, index=False) CONVERTERS = {("json", "elist"): json_to_elist}
24.470588
73
0.673077
db0ee653ba7b99bf137c744cb303c3c6f4d9112f
15,864
py
Python
tests/test_training.py
ireneb612/Voice-Cloning-App
41be23e29eae666e59a750270b43eda31506e1d7
[ "BSD-3-Clause" ]
556
2021-03-10T19:09:47.000Z
2022-03-30T13:45:13.000Z
tests/test_training.py
ireneb612/Voice-Cloning-App
41be23e29eae666e59a750270b43eda31506e1d7
[ "BSD-3-Clause" ]
100
2021-03-14T12:35:46.000Z
2022-03-26T07:57:42.000Z
tests/test_training.py
ireneb612/Voice-Cloning-App
41be23e29eae666e59a750270b43eda31506e1d7
[ "BSD-3-Clause" ]
84
2021-03-16T21:44:06.000Z
2022-03-30T21:58:28.000Z
import os import random from string import ascii_lowercase from unittest import mock import torch import shutil import logging from dataset import CHARACTER_ENCODING from training.clean_text import clean_text from training.checkpoint import ( load_checkpoint, save_checkpoint, checkpoint_cleanup, warm_start_model, transfer_symbols_embedding, ) from training.voice_dataset import VoiceDataset from training.tacotron2_model import Tacotron2 from training import DEFAULT_ALPHABET from training.train import train, MINIMUM_MEMORY_GB, WEIGHT_DECAY from training.validate import validate from training.utils import ( load_metadata, load_symbols, get_gpu_memory, get_available_memory, get_learning_rate, get_batch_size, check_early_stopping, LEARNING_RATE_PER_64, BATCH_SIZE_PER_GB, BASE_SYMBOLS, train_test_split, validate_dataset, ) from training.hifigan.train import train as train_hifigan from training.hifigan.utils import get_checkpoint_options, save_checkpoints from training.hifigan.utils import checkpoint_cleanup as hifigan_checkpoint_cleanup # Training class MockedTacotron2: _state_dict = {"param": None} def cuda(): return MockedTacotron2() def parameters(self): return {} def train(self): pass def eval(self): pass def zero_grad(self): pass def state_dict(self): return self._state_dict class MockedTacotron2Loss: class Loss: def item(): return 0.5 def __init__(self, *args): pass def __call__(self, *args): return self.Loss def item(self): return 0.5 def backward(self): pass class MockedOptimizer: param_groups = [{"lr": 0.1}] _state_dict = {"lr": 0.1} def __init__(self, parameters, lr, weight_decay): pass def step(): pass def state_dict(): return MockedOptimizer._state_dict @mock.patch("torch.cuda.is_available", return_value=True) @mock.patch("training.train.get_available_memory", return_value=MINIMUM_MEMORY_GB) @mock.patch("training.train.Tacotron2", return_value=MockedTacotron2) @mock.patch("training.train.Tacotron2Loss", return_value=MockedTacotron2Loss) @mock.patch("torch.optim.Adam", return_value=MockedOptimizer) @mock.patch("training.train.VoiceDataset", return_value=None) @mock.patch("training.train.DataLoader", return_value=[(None, None), (None, None)]) @mock.patch("training.train.process_batch", return_value=((None, None), (None,))) @mock.patch("torch.nn.utils.clip_grad_norm_") @mock.patch("training.train.validate", return_value=(0.5, 0.5)) @mock.patch("training.train.calc_avgmax_attention", return_value=0.5) def test_train( validate, clip_grad_norm_, process_batch, DataLoader, VoiceDataset, Adam, Tacotron2Loss, Tacotron2, get_available_memory, is_available, calc_avgmax_attention, ): metadata_path = os.path.join("test_samples", "dataset", "metadata.csv") dataset_directory = os.path.join("test_samples", "dataset", "wavs") output_directory = "checkpoint" train_size = 0.67 train( metadata_path, dataset_directory, output_directory, epochs=1, batch_size=1, early_stopping=False, multi_gpu=False, train_size=train_size, ) # Check checkpoint checkpoint_path = os.path.join(output_directory, "checkpoint_2") assert os.path.isfile(checkpoint_path) shutil.rmtree(output_directory) # Validate @mock.patch("training.validate.process_batch", return_value=((None,), (None,))) @mock.patch("training.validate.calc_avgmax_attention", return_value=0.5) def test_validate(process_batch, calc_avgmax_attention): loss, avgmax_attention = validate(MockedTacotron2(), [(None, None), (None, None)], MockedTacotron2Loss(), 0) assert loss == 0.5 assert avgmax_attention == 0.5 # Clean text def test_clean_text(): text = clean_text("1st $500 Mr. 10.5 2,000 30 a\tb ~") assert text == "first five hundred dollars mister ten point five two thousand thirty a b " def test_clean_text_with_custom_symbols(): text = clean_text("¿cómo estás?~\n", ["c", "ó", "m", "o", "e", "s", "t", "á", "s", "¿", "?", " "]) assert text == "¿cómo estás?" # Dataset @mock.patch("training.voice_dataset.clean_text", side_effect=lambda text, symbols: text) def test_voice_dataset(clean_text): random.seed(1234) metadata_path = os.path.join("test_samples", "dataset", "metadata.csv") audio_directory = os.path.join("test_samples", "dataset", "wavs") with open(metadata_path, encoding=CHARACTER_ENCODING) as f: filepaths_and_text = [line.strip().split("|") for line in f] dataset = VoiceDataset(filepaths_and_text, audio_directory, DEFAULT_ALPHABET) assert len(dataset) == 3 text, mel = dataset[0] assert len(text) == len("that five shots may have been") assert mel.shape[0] == 80 # Hifigan training class MockedHifigan: class Sample: def squeeze(self, index): return None def detach(self): return None def to(self, device): return self def parameters(self): return {} def train(self): pass def eval(self): pass def state_dict(self): return {} def __call__(self, x): return self.Sample() class MockedDiscriminator: def to(self, device): return self def parameters(self): return {} def state_dict(self): return {} def train(self): pass def __call__(self, *args): return [None, None, None, None] class MockedAdamW: def zero_grad(self): pass def step(self): pass def state_dict(self): return {} class MockedExponentialLR: def step(self): pass class MockedData: def to(self, device, non_blocking=False): return None def unsqueeze(self, index): return None class MockedL1Loss: def item(self): return 0 def __mul__(self, x): return 0 class MockedHifiganLoss: def __add__(self, other): return self def backward(self): return 0 def __format__(self, format_spec): return "" @mock.patch("torch.cuda.is_available", return_value=True) @mock.patch("torch.device", return_value="cpu") @mock.patch("training.hifigan.train.get_gpu_memory", return_value=0) @mock.patch("training.hifigan.train.Generator", return_value=MockedHifigan()) @mock.patch("training.hifigan.train.MultiPeriodDiscriminator", return_value=MockedDiscriminator()) @mock.patch("training.hifigan.train.MultiScaleDiscriminator", return_value=MockedDiscriminator()) @mock.patch("torch.optim.AdamW", return_value=MockedAdamW()) @mock.patch("torch.optim.lr_scheduler.ExponentialLR", return_value=MockedExponentialLR()) @mock.patch("training.hifigan.train.MelDataset", return_value=None) @mock.patch( "training.hifigan.train.DataLoader", return_value=[(MockedData(), MockedData(), MockedData(), MockedData())] ) @mock.patch("training.hifigan.train.mel_spectrogram", return_value=None) @mock.patch("training.hifigan.train.discriminator_loss", return_value=(MockedHifiganLoss(), 0, 0)) @mock.patch("torch.nn.functional.l1_loss", return_value=MockedL1Loss()) @mock.patch("training.hifigan.train.feature_loss", return_value=0) @mock.patch("training.hifigan.train.generator_loss", return_value=(MockedHifiganLoss(), 0)) def test_hifigan_train(*args): dataset_directory = os.path.join("test_samples", "dataset", "wavs") output_directory = "hifigan_checkpoints" train_hifigan(dataset_directory, output_directory, epochs=1, batch_size=1, iters_per_checkpoint=1, train_size=0.67) assert set(os.listdir(output_directory)) == {"do_2", "g_2"} shutil.rmtree(output_directory) # Hifigan utils @mock.patch("os.listdir", return_value=["do_1", "g_1", "do_3", "g_3", "do_2", "g_2"]) def test_get_checkpoint_options(listdir): assert get_checkpoint_options("") == [3, 2, 1] @mock.patch("torch.save") @mock.patch("training.hifigan.utils.checkpoint_cleanup") def test_save_checkpoints(checkpoint_cleanup, save): class DataObject: def __init__(self, value): self.value = value def state_dict(self): return self.value output_directory = "out" generator = DataObject({"generator": None}) mpd = DataObject({"mpd": None}) msd = DataObject({"msd": None}) optim_g = DataObject({"optim_g": None}) optim_d = DataObject({"optim_d": None}) iterations = 1 epochs = 1 generator_payload = {"generator": generator.state_dict()} discriminator_payload = { "mpd": mpd.state_dict(), "msd": msd.state_dict(), "optim_g": optim_g.state_dict(), "optim_d": optim_d.state_dict(), "steps": iterations, "epoch": epochs, } save_checkpoints(generator, mpd, msd, optim_g, optim_d, iterations, epochs, output_directory, 10, 100, logging) assert save.call_count == 2 assert list(save.call_args_list[0][0]) == [generator_payload, os.path.join(output_directory, "g_1")] assert list(save.call_args_list[1][0]) == [discriminator_payload, os.path.join(output_directory, "do_1")] @mock.patch("os.remove") def test_hifigan_checkpoint_cleanup_should_remove(remove): output_directory = "hifigan_checkpoints" hifigan_checkpoint_cleanup(output_directory, 20, 10, 100) assert remove.call_args_list[0][0][0] == os.path.join(output_directory, "g_10") assert remove.call_args_list[1][0][0] == os.path.join(output_directory, "do_10") @mock.patch("os.remove") def test_hifigan_checkpoint_cleanup_should_not_remove(remove): hifigan_checkpoint_cleanup("", 110, 10, 100) assert not remove.called # Checkpoints def test_load_and_save_checkpoint(): model_path = os.path.join("test_samples", "model.pt") model = Tacotron2() lr = 0.1 symbols = list("ABC") optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=WEIGHT_DECAY) model, optimizer, iteration, epoch = load_checkpoint(model_path, model, optimizer, [None] * 10000) assert iteration == 510000 assert epoch == iteration // 10000 checkpoint_folder = "test-checkpoints" os.makedirs(checkpoint_folder) save_checkpoint(model, optimizer, lr, iteration, symbols, epoch, checkpoint_folder, 1000, 1000) assert "checkpoint_510000" in os.listdir(checkpoint_folder) shutil.rmtree(checkpoint_folder) class MockedEmbeddingLayer: weight = torch.zeros(3) def test_transfer_symbols_embedding(): original_embedding_weight = torch.Tensor([0.1, 0.2, 0.3]) embedding_layer = MockedEmbeddingLayer() original_symbols = ["a", "c", "e"] new_symbols = ["a", "b", "é"] transfer_symbols_embedding(original_embedding_weight, embedding_layer, new_symbols, original_symbols) # Should match existing value in original_embedding_weight assert embedding_layer.weight[0] == 0.1 # Should not match existing value in original_embedding_weight assert embedding_layer.weight[1] not in original_embedding_weight # Should map e -> é assert embedding_layer.weight[2] == 0.3 @mock.patch("os.remove") def test_checkpoint_cleanup_should_remove(remove): # Old checkpoint (checkpoint_1000) should be removed checkpoint_cleanup("checkpoints", 2000, 1000, 10000) remove.assert_called_with(os.path.join("checkpoints", "checkpoint_1000")) @mock.patch("os.remove") def test_checkpoint_cleanup_should_not_remove(remove): # Backup checkpoint (checkpoint_20000) should not be removed checkpoint_cleanup("checkpoints", 21000, 1000, 10000) assert not remove.called def test_warm_start_model(): model_path = os.path.join("test_samples", "model.pt") model = Tacotron2() ignore_layers = ["embedding.weight"] symbols = list("ABC") model = warm_start_model(model_path, model, symbols, ignore_layers=ignore_layers) model_dict = model.state_dict() checkpoint_dict = torch.load(model_path, map_location="cpu")["state_dict"] for k in checkpoint_dict.keys(): if k not in ignore_layers: assert torch.equal(model_dict[k], checkpoint_dict[k]) # Metadata def test_load_metadata(): metadata_path = os.path.join("test_samples", "dataset", "metadata.csv") data = { "0_2730.wav": "the examination and testimony of the experts", "2820_5100.wav": "enabled the commission to conclude", "5130_7560.wav": "that five shots may have been", } filepaths_and_text = load_metadata(metadata_path) assert len(filepaths_and_text) == 3 for name, text in filepaths_and_text: assert data[name] == text def test_train_test_split(): filepaths_and_text = [ ("0_2730.wav", "the examination and testimony of the experts"), ("2820_5100.wav", "enabled the commission to conclude"), ("5130_7560.wav", "that five shots may have been"), ] train_files, test_files = train_test_split(filepaths_and_text, 0.67) assert train_files == filepaths_and_text[:2] assert test_files == filepaths_and_text[2:] # Validate dataset @mock.patch("os.listdir", return_value=["1.wav", "3.wav"]) def test_validate_dataset_missing_files(listdir): filepaths_and_text = [("1.wav", "abc"), ("2.wav", "abc"), ("3.wav", "abc")] symbols = ["a", "b", "c"] exception = "" try: validate_dataset(filepaths_and_text, "", symbols) except AssertionError as e: exception = str(e) assert exception == "Missing files: 2.wav" @mock.patch("os.listdir", return_value=["1.wav", "2.wav"]) def test_validate_dataset_invalid_characters(listdir): filepaths_and_text = [ ("1.wav", "abc"), ("2.wav", "def"), ] symbols = ["a", "b", "c"] exception = "" try: validate_dataset(filepaths_and_text, "", symbols) except AssertionError as e: exception = str(e) failed_characters = exception.split(":")[1] for character in ["d", "e", "f"]: assert character in failed_characters # Memory class FakeDeviceProperties: total_memory = 8 * 1024 * 1024 * 1024 @mock.patch("torch.cuda.get_device_properties", return_value=FakeDeviceProperties) @mock.patch("torch.cuda.memory_allocated", return_value=1 * 1024 * 1024 * 1024) def test_get_available_memory(memory_allocated, get_device_properties, device_count): # 8GB Device memory - 1GB Usage assert get_gpu_memory(0) == 7 @mock.patch("torch.cuda.device_count", return_value=2) @mock.patch("torch.cuda.get_device_properties", return_value=FakeDeviceProperties) @mock.patch("torch.cuda.memory_allocated", return_value=1 * 1024 * 1024 * 1024) def test_get_available_memory(memory_allocated, get_device_properties, device_count): # 16GB Device memory - 2GB Usage assert get_available_memory() == 14 # Symbols def test_load_symbols(): alphabet_path = os.path.join("test_samples", "english.txt") symbols = set(load_symbols(alphabet_path)) assert set(ascii_lowercase).issubset(symbols) assert set(BASE_SYMBOLS).issubset(symbols) # Early stopping def test_early_stopping(): # Too few values assert check_early_stopping([10, 10, 10, 10]) is False # Loss still improving assert check_early_stopping([1.1, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]) is False # Loss not improving assert check_early_stopping([0.5, 0.4999, 0.5, 0.4999, 0.5, 0.4998, 0.4999, 0.4996, 0.4997, 0.5]) is True # Parameters def test_get_learning_rate(): batch_size = 40 lr = get_learning_rate(batch_size) assert lr == (batch_size / 64) ** 0.5 * LEARNING_RATE_PER_64 def test_get_batch_size(): memory = 8 batch_size = get_batch_size(memory) assert batch_size == int(memory * BATCH_SIZE_PER_GB)
29.932075
119
0.697869
eb9474aac8a1303c4b3554881c55efd8ffd8a460
24,953
py
Python
CTFd/models/__init__.py
amanbansal2709/ctfd
941335a5e205ca818ce1758076858b628e4fa05b
[ "Apache-2.0" ]
null
null
null
CTFd/models/__init__.py
amanbansal2709/ctfd
941335a5e205ca818ce1758076858b628e4fa05b
[ "Apache-2.0" ]
null
null
null
CTFd/models/__init__.py
amanbansal2709/ctfd
941335a5e205ca818ce1758076858b628e4fa05b
[ "Apache-2.0" ]
1
2021-12-23T14:11:15.000Z
2021-12-23T14:11:15.000Z
from flask_sqlalchemy import SQLAlchemy from flask_marshmallow import Marshmallow from sqlalchemy.sql.expression import union_all from sqlalchemy.orm import validates, column_property from sqlalchemy.ext.hybrid import hybrid_property from CTFd.utils.crypto import hash_password from CTFd.cache import cache import datetime import six db = SQLAlchemy() ma = Marshmallow() def get_class_by_tablename(tablename): """Return class reference mapped to table. https://stackoverflow.com/a/23754464 :param tablename: String with name of table. :return: Class reference or None. """ for c in db.Model._decl_class_registry.values(): if hasattr(c, '__tablename__') and c.__tablename__ == tablename: return c return None class Notifications(db.Model): __tablename__ = 'notifications' id = db.Column(db.Integer, primary_key=True) title = db.Column(db.Text) content = db.Column(db.Text) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) user_id = db.Column(db.Integer, db.ForeignKey('users.id')) team_id = db.Column(db.Integer, db.ForeignKey('teams.id')) user = db.relationship('Users', foreign_keys="Notifications.user_id", lazy='select') team = db.relationship('Teams', foreign_keys="Notifications.team_id", lazy='select') def __init__(self, *args, **kwargs): super(Notifications, self).__init__(**kwargs) class Pages(db.Model): __tablename__ = 'pages' id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(80)) route = db.Column(db.String(128), unique=True) content = db.Column(db.Text) draft = db.Column(db.Boolean) hidden = db.Column(db.Boolean) auth_required = db.Column(db.Boolean) # TODO: Use hidden attribute files = db.relationship("PageFiles", backref="page") def __init__(self, *args, **kwargs): super(Pages, self).__init__(**kwargs) def __repr__(self): return "<Pages {0}>".format(self.route) class Challenges(db.Model): __tablename__ = 'challenges' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) description = db.Column(db.Text) max_attempts = db.Column(db.Integer, default=0) value = db.Column(db.Integer) category = db.Column(db.String(80)) type = db.Column(db.String(80)) state = db.Column(db.String(80), nullable=False, default='visible') requirements = db.Column(db.JSON) files = db.relationship("ChallengeFiles", backref="challenge") tags = db.relationship("Tags", backref="challenge") hints = db.relationship("Hints", backref="challenge") __mapper_args__ = { 'polymorphic_identity': 'standard', 'polymorphic_on': type } def __init__(self, *args, **kwargs): super(Challenges, self).__init__(**kwargs) def __repr__(self): return '<Challenge %r>' % self.name class Hints(db.Model): __tablename__ = 'hints' id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(80), default='standard') challenge_id = db.Column(db.Integer, db.ForeignKey('challenges.id')) content = db.Column(db.Text) cost = db.Column(db.Integer, default=0) requirements = db.Column(db.JSON) __mapper_args__ = { 'polymorphic_identity': 'standard', 'polymorphic_on': type } @property def name(self): return "Hint {id}".format(id=self.id) @property def category(self): return self.__tablename__ @property def description(self): return "Hint for {name}".format(name=self.challenge.name) def __init__(self, *args, **kwargs): super(Hints, self).__init__(**kwargs) def __repr__(self): return '<Hint %r>' % self.content class Awards(db.Model): __tablename__ = 'awards' id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('users.id')) team_id = db.Column(db.Integer, db.ForeignKey('teams.id')) type = db.Column(db.String(80), default='standard') name = db.Column(db.String(80)) description = db.Column(db.Text) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) value = db.Column(db.Integer) category = db.Column(db.String(80)) icon = db.Column(db.Text) requirements = db.Column(db.JSON) user = db.relationship('Users', foreign_keys="Awards.user_id", lazy='select') team = db.relationship('Teams', foreign_keys="Awards.team_id", lazy='select') __mapper_args__ = { 'polymorphic_identity': 'standard', 'polymorphic_on': type } @hybrid_property def account_id(self): user_mode = get_config('user_mode') if user_mode == 'teams': return self.team_id elif user_mode == 'users': return self.user_id def __init__(self, *args, **kwargs): super(Awards, self).__init__(**kwargs) def __repr__(self): return '<Award %r>' % self.name class Tags(db.Model): __tablename__ = 'tags' id = db.Column(db.Integer, primary_key=True) challenge_id = db.Column(db.Integer, db.ForeignKey('challenges.id')) value = db.Column(db.String(80)) def __init__(self, *args, **kwargs): super(Tags, self).__init__(**kwargs) class Files(db.Model): __tablename__ = 'files' id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(80), default='standard') location = db.Column(db.Text) __mapper_args__ = { 'polymorphic_identity': 'standard', 'polymorphic_on': type } def __init__(self, *args, **kwargs): super(Files, self).__init__(**kwargs) def __repr__(self): return "<File type={type} location={location}>".format(type=self.type, location=self.location) class ChallengeFiles(Files): __mapper_args__ = { 'polymorphic_identity': 'challenge' } challenge_id = db.Column(db.Integer, db.ForeignKey('challenges.id')) def __init__(self, *args, **kwargs): super(ChallengeFiles, self).__init__(**kwargs) class PageFiles(Files): __mapper_args__ = { 'polymorphic_identity': 'page' } page_id = db.Column(db.Integer, db.ForeignKey('pages.id')) def __init__(self, *args, **kwargs): super(PageFiles, self).__init__(**kwargs) class Flags(db.Model): __tablename__ = 'flags' id = db.Column(db.Integer, primary_key=True) challenge_id = db.Column(db.Integer, db.ForeignKey('challenges.id')) type = db.Column(db.String(80)) content = db.Column(db.Text) data = db.Column(db.Text) challenge = db.relationship('Challenges', foreign_keys="Flags.challenge_id", lazy='select') __mapper_args__ = { 'polymorphic_on': type } def __init__(self, *args, **kwargs): super(Flags, self).__init__(**kwargs) def __repr__(self): return "<Flag {0} for challenge {1}>".format(self.content, self.challenge_id) class Users(db.Model): __tablename__ = 'users' __table_args__ = ( db.UniqueConstraint('id', 'oauth_id'), {} ) # Core attributes id = db.Column(db.Integer, primary_key=True) oauth_id = db.Column(db.Integer, unique=True) # User names are not constrained to be unique to allow for official/unofficial teams. name = db.Column(db.String(128)) password = db.Column(db.String(128)) email = db.Column(db.String(128), unique=True) type = db.Column(db.String(80)) secret = db.Column(db.String(128)) # Supplementary attributes website = db.Column(db.String(128)) affiliation = db.Column(db.String(128)) country = db.Column(db.String(32)) bracket = db.Column(db.String(32)) hidden = db.Column(db.Boolean, default=False) banned = db.Column(db.Boolean, default=False) verified = db.Column(db.Boolean, default=False) # Relationship for Teams team_id = db.Column(db.Integer, db.ForeignKey('teams.id')) created = db.Column(db.DateTime, default=datetime.datetime.utcnow) __mapper_args__ = { 'polymorphic_identity': 'user', 'polymorphic_on': type } def __init__(self, **kwargs): super(Users, self).__init__(**kwargs) @validates('password') def validate_password(self, key, plaintext): return hash_password(str(plaintext)) @hybrid_property def account_id(self): user_mode = get_config('user_mode') if user_mode == 'teams': return self.team_id elif user_mode == 'users': return self.id @property def solves(self): return self.get_solves(admin=False) @property def fails(self): return self.get_fails(admin=False) @property def awards(self): return self.get_awards(admin=False) @property def score(self): return self.get_score(admin=False) @property def place(self): return self.get_place(admin=False) def get_solves(self, admin=False): solves = Solves.query.filter_by(user_id=self.id) freeze = get_config('freeze') if freeze and admin is False: dt = datetime.datetime.utcfromtimestamp(freeze) solves = solves.filter(Solves.date < dt) return solves.all() def get_fails(self, admin=False): fails = Fails.query.filter_by(user_id=self.id) freeze = get_config('freeze') if freeze and admin is False: dt = datetime.datetime.utcfromtimestamp(freeze) fails = fails.filter(Solves.date < dt) return fails.all() def get_awards(self, admin=False): awards = Awards.query.filter_by(user_id=self.id) freeze = get_config('freeze') if freeze and admin is False: dt = datetime.datetime.utcfromtimestamp(freeze) awards = awards.filter(Solves.date < dt) return awards.all() def get_score(self, admin=False): score = db.func.sum(Challenges.value).label('score') user = db.session.query( Solves.user_id, score ) \ .join(Users, Solves.user_id == Users.id) \ .join(Challenges, Solves.challenge_id == Challenges.id) \ .filter(Users.id == self.id) award_score = db.func.sum(Awards.value).label('award_score') award = db.session.query(award_score).filter_by(user_id=self.id) if not admin: freeze = Configs.query.filter_by(key='freeze').first() if freeze and freeze.value: freeze = int(freeze.value) freeze = datetime.datetime.utcfromtimestamp(freeze) user = user.filter(Solves.date < freeze) award = award.filter(Awards.date < freeze) user = user.group_by(Solves.user_id).first() award = award.first() if user and award: return int(user.score or 0) + int(award.award_score or 0) elif user: return int(user.score or 0) elif award: return int(award.award_score or 0) else: return 0 def get_place(self, admin=False, numeric=False): """ This method is generally a clone of CTFd.scoreboard.get_standings. The point being that models.py must be self-reliant and have little to no imports within the CTFd application as importing from the application itself will result in a circular import. """ scores = db.session.query( Solves.user_id.label('user_id'), db.func.sum(Challenges.value).label('score'), db.func.max(Solves.id).label('id'), db.func.max(Solves.date).label('date') ).join(Challenges).filter(Challenges.value != 0).group_by(Solves.user_id) awards = db.session.query( Awards.user_id.label('user_id'), db.func.sum(Awards.value).label('score'), db.func.max(Awards.id).label('id'), db.func.max(Awards.date).label('date') ).filter(Awards.value != 0).group_by(Awards.user_id) if not admin: freeze = Configs.query.filter_by(key='freeze').first() if freeze and freeze.value: freeze = int(freeze.value) freeze = datetime.datetime.utcfromtimestamp(freeze) scores = scores.filter(Solves.date < freeze) awards = awards.filter(Awards.date < freeze) results = union_all(scores, awards).alias('results') sumscores = db.session.query( results.columns.user_id, db.func.sum(results.columns.score).label('score'), db.func.max(results.columns.id).label('id'), db.func.max(results.columns.date).label('date') ).group_by(results.columns.user_id).subquery() if admin: standings_query = db.session.query( Users.id.label('user_id'), ) \ .join(sumscores, Users.id == sumscores.columns.user_id) \ .order_by(sumscores.columns.score.desc(), sumscores.columns.id) else: standings_query = db.session.query( Users.id.label('user_id'), ) \ .join(sumscores, Users.id == sumscores.columns.user_id) \ .filter(Users.banned == False, Users.hidden == False) \ .order_by(sumscores.columns.score.desc(), sumscores.columns.id) standings = standings_query.all() # http://codegolf.stackexchange.com/a/4712 try: i = standings.index((self.id,)) + 1 if numeric: return i else: k = i % 10 return "%d%s" % (i, "tsnrhtdd"[(i / 10 % 10 != 1) * (k < 4) * k::4]) except ValueError: return 0 class Admins(Users): __tablename__ = 'admins' __mapper_args__ = { 'polymorphic_identity': 'admin' } class Teams(db.Model): __tablename__ = 'teams' __table_args__ = ( db.UniqueConstraint('id', 'oauth_id'), {} ) # Core attributes id = db.Column(db.Integer, primary_key=True) oauth_id = db.Column(db.Integer, unique=True) # Team names are not constrained to be unique to allow for official/unofficial teams. name = db.Column(db.String(128)) email = db.Column(db.String(128), unique=True) password = db.Column(db.String(128)) secret = db.Column(db.String(128)) members = db.relationship("Users", backref="team", foreign_keys='Users.team_id') # Supplementary attributes website = db.Column(db.String(128)) affiliation = db.Column(db.String(128)) country = db.Column(db.String(32)) bracket = db.Column(db.String(32)) hidden = db.Column(db.Boolean, default=False) banned = db.Column(db.Boolean, default=False) # Relationship for Users captain_id = db.Column(db.Integer, db.ForeignKey('users.id')) captain = db.relationship("Users", foreign_keys=[captain_id]) created = db.Column(db.DateTime, default=datetime.datetime.utcnow) def __init__(self, **kwargs): super(Teams, self).__init__(**kwargs) @validates('password') def validate_password(self, key, plaintext): return hash_password(str(plaintext)) @property def solves(self): return self.get_solves(admin=False) @property def fails(self): return self.get_fails(admin=False) @property def awards(self): return self.get_awards(admin=False) @property def score(self): return self.get_score(admin=False) @property def place(self): return self.get_place(admin=False) def get_solves(self, admin=False): member_ids = [member.id for member in self.members] solves = Solves.query.filter( Solves.user_id.in_(member_ids) ).order_by( Solves.date.asc() ) freeze = get_config('freeze') if freeze and admin is False: dt = datetime.datetime.utcfromtimestamp(freeze) solves = solves.filter(Solves.date < dt) return solves.all() def get_fails(self, admin=False): member_ids = [member.id for member in self.members] fails = Fails.query.filter( Fails.user_id.in_(member_ids) ).order_by( Fails.date.asc() ) freeze = get_config('freeze') if freeze and admin is False: dt = datetime.datetime.utcfromtimestamp(freeze) fails = fails.filter(Solves.date < dt) return fails.all() def get_awards(self, admin=False): member_ids = [member.id for member in self.members] awards = Awards.query.filter( Awards.user_id.in_(member_ids) ).order_by( Awards.date.asc() ) freeze = get_config('freeze') if freeze and admin is False: dt = datetime.datetime.utcfromtimestamp(freeze) awards = awards.filter(Solves.date < dt) return awards.all() def get_score(self, admin=False): score = 0 for member in self.members: score += member.get_score(admin=admin) return score def get_place(self, admin=False): """ This method is generally a clone of CTFd.scoreboard.get_standings. The point being that models.py must be self-reliant and have little to no imports within the CTFd application as importing from the application itself will result in a circular import. """ scores = db.session.query( Solves.team_id.label('team_id'), db.func.sum(Challenges.value).label('score'), db.func.max(Solves.id).label('id'), db.func.max(Solves.date).label('date') ).join(Challenges).filter(Challenges.value != 0).group_by(Solves.team_id) awards = db.session.query( Awards.team_id.label('team_id'), db.func.sum(Awards.value).label('score'), db.func.max(Awards.id).label('id'), db.func.max(Awards.date).label('date') ).filter(Awards.value != 0).group_by(Awards.team_id) if not admin: freeze = Configs.query.filter_by(key='freeze').first() if freeze and freeze.value: freeze = int(freeze.value) freeze = datetime.datetime.utcfromtimestamp(freeze) scores = scores.filter(Solves.date < freeze) awards = awards.filter(Awards.date < freeze) results = union_all(scores, awards).alias('results') sumscores = db.session.query( results.columns.team_id, db.func.sum(results.columns.score).label('score'), db.func.max(results.columns.id).label('id'), db.func.max(results.columns.date).label('date') ).group_by(results.columns.team_id).subquery() if admin: standings_query = db.session.query( Teams.id.label('team_id'), ) \ .join(sumscores, Teams.id == sumscores.columns.team_id) \ .order_by(sumscores.columns.score.desc(), sumscores.columns.id) else: standings_query = db.session.query( Teams.id.label('team_id'), ) \ .join(sumscores, Teams.id == sumscores.columns.team_id) \ .filter(Teams.banned == False) \ .order_by(sumscores.columns.score.desc(), sumscores.columns.id) standings = standings_query.all() # http://codegolf.stackexchange.com/a/4712 try: i = standings.index((self.id,)) + 1 k = i % 10 return "%d%s" % (i, "tsnrhtdd"[(i / 10 % 10 != 1) * (k < 4) * k::4]) except ValueError: return 0 class Submissions(db.Model): __tablename__ = 'submissions' id = db.Column(db.Integer, primary_key=True) challenge_id = db.Column(db.Integer, db.ForeignKey('challenges.id', ondelete='CASCADE')) user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE')) team_id = db.Column(db.Integer, db.ForeignKey('teams.id', ondelete='CASCADE')) ip = db.Column(db.String(46)) provided = db.Column(db.Text) type = db.Column(db.String(32)) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) # Relationships user = db.relationship('Users', foreign_keys="Submissions.user_id", lazy='select') team = db.relationship('Teams', foreign_keys="Submissions.team_id", lazy='select') challenge = db.relationship('Challenges', foreign_keys="Submissions.challenge_id", lazy='select') __mapper_args__ = { 'polymorphic_on': type, } @hybrid_property def account_id(self): user_mode = get_config('user_mode') if user_mode == 'teams': return self.team_id elif user_mode == 'users': return self.user_id @hybrid_property def account(self): user_mode = get_config('user_mode') if user_mode == 'teams': return self.team elif user_mode == 'users': return self.user @staticmethod def get_child(type): child_classes = { x.polymorphic_identity: x.class_ for x in Submissions.__mapper__.self_and_descendants } return child_classes[type] def __repr__(self): return '<Submission {}, {}, {}, {}>'.format(self.team_id, self.challenge_id, self.ip, self.provided) class Solves(Submissions): __tablename__ = 'solves' __table_args__ = ( db.UniqueConstraint('challenge_id', 'user_id'), db.UniqueConstraint('challenge_id', 'team_id'), {} ) id = db.Column(None, db.ForeignKey('submissions.id', ondelete='CASCADE'), primary_key=True) challenge_id = column_property(db.Column(db.Integer, db.ForeignKey('challenges.id', ondelete='CASCADE')), Submissions.challenge_id) user_id = column_property(db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE')), Submissions.user_id) team_id = column_property(db.Column(db.Integer, db.ForeignKey('teams.id', ondelete='CASCADE')), Submissions.team_id) user = db.relationship('Users', foreign_keys="Solves.user_id", lazy='select') team = db.relationship('Teams', foreign_keys="Solves.team_id", lazy='select') challenge = db.relationship('Challenges', foreign_keys="Solves.challenge_id", lazy='select') __mapper_args__ = { 'polymorphic_identity': 'correct' } class Fails(Submissions): __mapper_args__ = { 'polymorphic_identity': 'incorrect' } class Unlocks(db.Model): __tablename__ = 'unlocks' id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('users.id')) team_id = db.Column(db.Integer, db.ForeignKey('teams.id')) target = db.Column(db.Integer) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) type = db.Column(db.String(32)) __mapper_args__ = { 'polymorphic_on': type, } @hybrid_property def account_id(self): user_mode = get_config('user_mode') if user_mode == 'teams': return self.team_id elif user_mode == 'users': return self.user_id def __repr__(self): return '<Unlock %r>' % self.id class HintUnlocks(Unlocks): __mapper_args__ = { 'polymorphic_identity': 'hints' } class Tracking(db.Model): __tablename__ = 'tracking' id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(32)) ip = db.Column(db.String(46)) user_id = db.Column(db.Integer, db.ForeignKey('users.id')) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) user = db.relationship('Users', foreign_keys="Tracking.user_id", lazy='select') __mapper_args__ = { 'polymorphic_on': type, } def __init__(self, *args, **kwargs): super(Tracking, self).__init__(**kwargs) def __repr__(self): return '<Tracking %r>' % self.ip class Configs(db.Model): __tablename__ = 'config' id = db.Column(db.Integer, primary_key=True) key = db.Column(db.Text) value = db.Column(db.Text) def __init__(self, *args, **kwargs): super(Configs, self).__init__(**kwargs) @cache.memoize() def get_config(key): """ This should be a direct clone of its implementation in utils. It is used to avoid a circular import. """ config = Configs.query.filter_by(key=key).first() if config and config.value: value = config.value if value and value.isdigit(): return int(value) elif value and isinstance(value, six.string_types): if value.lower() == 'true': return True elif value.lower() == 'false': return False else: return value
32.78975
120
0.62257
5dbec600880facd14277c574d044846e9e379924
32,082
py
Python
eqsig/functions.py
eng-tools/eqsig
c6b5eccea3d37a2debe084ad663d58af84306c86
[ "MIT" ]
15
2018-10-08T19:18:06.000Z
2022-02-05T16:03:31.000Z
eqsig/functions.py
eng-tools/eqsig
c6b5eccea3d37a2debe084ad663d58af84306c86
[ "MIT" ]
2
2019-11-06T05:07:45.000Z
2021-04-19T09:59:25.000Z
eqsig/functions.py
eng-tools/eqsig
c6b5eccea3d37a2debe084ad663d58af84306c86
[ "MIT" ]
8
2018-10-08T19:18:09.000Z
2022-02-03T12:08:33.000Z
import numpy as np import eqsig from eqsig import exceptions def time_series_from_motion(motion, dt): npts = len(motion) return np.linspace(0, dt * (npts + 1), npts) def determine_indices_of_peaks_for_cleaned(values): """DEPRECATED: Use determine_indices_of_peaks_for_cleaned_array()""" return determine_indices_of_peaks_for_cleaned_array(values) def determine_indices_of_peaks_for_cleaned_array(values): """ Determines the position of values that form a local peak in a signal. Warning: data must be cleaned so that adjacent points have the same value Parameters ---------- values: array_like Array of values that peaks will be found in Returns ------- peak_indices: array_like of int Array of indices of peaks """ diff = np.ediff1d(values, to_begin=0) # if negative then direction has switched # direction_switch = np.insert(direction_switch, 0, 0) peak_indices = np.where(diff[1:] * diff[:-1] < 0)[0] peak_indices = np.insert(peak_indices, 0, 0) # Include first and last value peak_indices = np.insert(peak_indices, len(peak_indices), len(values) - 1) return peak_indices def _determine_peak_only_series_4_cleaned_data(values): """ Determines the Note: array must not contain adjacent repeated values :param values: :return: """ peak_indices = determine_indices_of_peaks_for_cleaned_array(values) peak_values = np.take(values, peak_indices) signs = np.where(np.mod(np.arange(len(peak_values)), 2), -1, 1) delta_peaks = np.where(-signs * peak_values < 0, -np.abs(peak_values), np.abs(peak_values)) delta_peaks_series = np.zeros_like(values) np.put(delta_peaks_series, peak_indices, delta_peaks) return delta_peaks_series def determine_peak_only_delta_series_4_cleaned_data(values): """ Determines the Note: array must not contain adjacent repeated values :param values: :return: """ peak_indices = determine_indices_of_peaks_for_cleaned_array(values) peak_values = np.take(values, peak_indices) delta_peaks = np.diff(peak_values) delta_peaks = np.insert(delta_peaks, 0, 0) delta_peaks_series = np.zeros_like(values) assert len(delta_peaks) == len(peak_indices) np.put(delta_peaks_series, peak_indices, delta_peaks) return delta_peaks_series def clean_out_non_changing(values): """ Takes an array removes all values that are the same as the previous value. :param values: array of floats :return: cleaned array, indices of clean values in original array """ # diff_values = np.diff(values) # diff_values = np.insert(diff_values, 0, values[0]) diff_values = np.ediff1d(values, to_begin=values[0]) non_zero_indices = np.where(diff_values != 0)[0] non_zero_indices = np.insert(non_zero_indices, 0, 0) cleaned_values = np.take(values, non_zero_indices) return cleaned_values, non_zero_indices def get_peak_array_indices(values, ptype='all'): """ Find the indices for all of the local maxima and minima Parameters ---------- :param values: array_like, array of values :return: Examples -------- >>> values = np.array([0, 2, 1, 2, -1, 1, 1, 0.3, -1, 0.2, 1, 0.2]) np.array([0, 2, 1, 2, -1, 1, 1, 0.3, -1, 0.2, 1, 0.2]) >>> get_peak_array_indices(values) np.array([0, 1, 2, 3, 4, 5, 8, 10, 11]) """ # enforce array type values = np.array(values, dtype=float) # remove all non-changing values cleaned_values, non_zero_indices = clean_out_non_changing(values) # cleaned_values *= np.sign(cleaned_values[1]) # ensure first value is increasing peak_cleaned_indices = determine_indices_of_peaks_for_cleaned_array(cleaned_values) peak_full_indices = np.take(non_zero_indices, peak_cleaned_indices) if ptype == 'min': if values[1] - values[0] <= 0: return peak_full_indices[1::2] else: return peak_full_indices[::2] elif ptype == 'max': if values[1] - values[0] > 0: return peak_full_indices[1::2] else: return peak_full_indices[::2] return peak_full_indices def get_n_cyc_array(values, opt='all', start='origin'): """ Given an array, create an array of the same length that numbers the peaks Parameters ---------- values opt Returns ------- """ if opt == 'all': indys = get_peak_array_indices(values) elif opt == 'switched': indys = get_switched_peak_array_indices(values) else: raise ValueError('opt must be either "all" or "switched"') # each indy corresponds to half a cycle if start == 'origin': svalue = -0.25 elif start == 'peak': svalue = 0.0 else: raise ValueError('start must be either "origin" or "peak"') if indys[0] != 0: indys = np.insert(indys, 0, 0) n_cycs = 0.5 * np.arange(len(indys)) n_cycs[1:] += svalue return np.interp(np.arange(len(values)), indys, n_cycs) def get_peak_indices(asig): return get_peak_array_indices(asig.values) def get_zero_crossings_array_indices(values, keep_adj_zeros=False): """ Find the indices for values that are equal to zero or just passed through zero Parameters ---------- values: array_like array of values keep_adj_zeros: bool, if false then if adjacent zeros are found, only first is included :return: Examples -------- >>> values = np.array([0, 2, 1, 2, -1, 1, 0, 0, 1, 0.3, 0, -1, 0.2, 1, 0.2]) np.array([0, 2, 1, 2, -1, 1, 0, 0, 1, 0.3, 0, -1, 0.2, 1, 0.2]) >>> get_zero_crossings_array_indices(values, keep_adj_zeros=False) np.array([0, 4, 5, 6, 10, 12]) """ # enforce array type values = np.array(values, dtype=float) # get all zero values zero_indices = np.where(values == 0)[0] if not keep_adj_zeros and len(zero_indices) > 1: diff_is = np.ediff1d(zero_indices, to_begin=10) no_adj_is = np.where(diff_is > 1)[0] zero_indices = np.take(zero_indices, no_adj_is) # if negative then sign has switched sign_switch = values[1:] * values[:-1] sign_switch = np.insert(sign_switch, 0, values[0]) through_zero_indices = np.where(sign_switch < 0)[0] all_zc_indices = np.concatenate((zero_indices, through_zero_indices)) all_zc_indices.sort() if all_zc_indices[0] != 0: all_zc_indices = np.insert(all_zc_indices, 0, 0) # slow return all_zc_indices def get_zero_crossings_indices(asig): return get_zero_crossings_array_indices(asig.values) def get_major_change_indices(y, rtol=1.0e-8, atol=1.0e-5, already_diff=False, dx=1): """Get indices where a significant change in slope occurs""" if already_diff: dydx = y else: dydx = np.diff(y, prepend=y[0]) / dx inds = [0] z_cur = 0 npts = len(dydx) i = 1 while z_cur + i < npts - 1: end_z = z_cur + i av_dydx = np.mean(dydx[z_cur: end_z]) if not np.isclose(av_dydx, dydx[end_z + 1], rtol=rtol, atol=atol): inds.append(end_z) z_cur = end_z + 1 i = 0 i += 1 inds.append(npts - 1) return inds def determine_peaks_only_delta_series(values): """ Creates an array with the changes between peak values and zeros for non-peak values. Parameters ---------- :param values: array_like, array of values :return: Examples -------- >>> values = np.array([0, 2, 1, 2, 0, 1, 0, -1, 0, 1, 0]) np.array([0, 2, 1, 2, 0.3, 1, 0.3, -1, 0.4, 1, 0]) >>> determine_peaks_only_delta_series(values) array([0, 2, -1, 1, 0, -1, 0, -2, 0, 2, 0]) """ # enforce array type values = np.array(values) # rebase to zero as first value values -= values[0] # remove all non-changing values cleaned_values, non_zero_indices = clean_out_non_changing(values) cleaned_values *= np.sign(cleaned_values[1]) # ensure first value is increasing # compute delta peaks for cleaned data cleaned_delta_peak_series = determine_peak_only_delta_series_4_cleaned_data(cleaned_values) # re-index data to uncleaned array delta_peaks_series = np.zeros_like(values) np.put(delta_peaks_series, non_zero_indices, cleaned_delta_peak_series) return delta_peaks_series def determine_pseudo_cyclic_peak_only_series(values): """ Creates an array with only peak values assuming an alternative sign and zeros for non-peak values. Parameters ---------- :param values: array_like, array of values :return: Examples -------- >>> values = np.array([0, 2, 1, 2, 0, 1, 0, -1, 0, 1, 0]) np.array([0, 2, 1, 2, 0.3, 1, 0.3, -1, 0.4, 1, 0]) >>> determine_pseudo_cyclic_peak_only_series(values) array([0, 2, -1, 2, 0, 1, 0, 1, 0, 1, 0]) """ # enforce array type values = np.array(values) # rebase to zero as first value values -= values[0] # remove all non-changing values cleaned_values, non_zero_indices = clean_out_non_changing(values) cleaned_values *= np.sign(cleaned_values[1]) # ensure first value is increasing # compute delta peaks for cleaned data cleaned_delta_peak_series = _determine_peak_only_series_4_cleaned_data(cleaned_values) # re-index data to uncleaned array delta_peaks_series = np.zeros_like(values) np.put(delta_peaks_series, non_zero_indices, cleaned_delta_peak_series) return delta_peaks_series def fas2signal(fas, dt, stype="signal"): """ Convert a fourier spectrum to time series signal Parameters ---------- fas: array_like of img floats Positive part only dt: float time step of time series stype: str If 'signal' then return Signal, else return AccSignal """ from eqsig.single import Signal, AccSignal n = 2 * len(fas) a = np.zeros(2 * len(fas), dtype=complex) a[1:n // 2] = fas[1:] a[n // 2 + 1:] = np.flip(np.conj(fas[1:]), axis=0) a /= dt s = np.fft.ifft(a) npts = int(2 ** (np.log(n) / np.log(2))) s = s[:npts] if stype == 'signal': return Signal(s, dt) else: return AccSignal(s, dt) def fas2values(fas, dt): """ Convert a fourier spectrum to time series signal Parameters ---------- fas: array_like of img floats Positive part only dt: float time step of time series stype: str If 'signal' then return Signal, else return AccSignal """ n = 2 * len(fas) a = np.zeros(2 * len(fas), dtype=complex) a[1:n // 2] = fas[1:] a[n // 2 + 1:] = np.flip(np.conj(fas[1:]), axis=0) a /= dt s = np.fft.ifft(a) npts = int(2 ** (np.log(n) / np.log(2))) s = s[:npts] return s def generate_fa_spectrum(sig, n_pad=True): """ Produces the Fourier amplitude spectrum Parameters ---------- sig: eqsig.Signal Returns ------- fa_spectrum: complex array_like Complex values of the spectrum fa_frequencies: array_like Frequencies of the spectrum """ npts = sig.npts if n_pad: n_factor = 2 ** int(np.ceil(np.log2(npts))) fa = np.fft.fft(sig.values, n=n_factor) points = int(n_factor / 2) assert len(fa) == n_factor else: fa = np.fft.fft(sig.values) points = int(sig.npts / 2) fa_spectrum = fa[range(points)] * sig.dt fa_frequencies = np.arange(points) / (2 * points * sig.dt) return fa_spectrum, fa_frequencies def calc_fa_spectrum(sig, n=None, p2_plus=None): """ Produces the Fourier amplitude spectrum Parameters ---------- sig: eqsig.Signal Returns ------- fa_spectrum: complex array_like Complex values of the spectrum fa_frequencies: array_like Frequencies of the spectrum """ npts = sig.npts if p2_plus is not None or n is not None: if n is not None: n_vals = n else: n_vals = 2 ** int(np.ceil(np.log2(npts)) + p2_plus) fa = np.fft.fft(sig.values, n=n_vals) points = int(n_vals / 2) assert len(fa) == n_vals else: fa = np.fft.fft(sig.values) points = int(sig.npts / 2) fa_spectrum = fa[range(points)] * sig.dt fa_frequencies = np.arange(points) / (2 * points * sig.dt) return fa_spectrum, fa_frequencies def interp_array_to_approx_dt(values, dt, target_dt=0.01, even=True): """ Interpolate an array of values to a new time step Similar to ``interp_to_approx_dt`` Only a target time step is provided and the algorithm determines what time step is best to minimise loss of data from aliasing Parameters ---------- values: array_like values of time series dt: float Time step target_dt: float Target time step even: bool If true then forces the number of time steps to be an even number Returns ------- new_values: array_like Interpolated value of time series new_dt: float New time step of interpolate time series """ factor = dt / target_dt if factor == 1: pass elif factor > 1: factor = int(np.ceil(factor)) else: factor = 1 / np.floor(1 / factor) t_int = np.arange(len(values)) new_npts = factor * len(values) if even: new_npts = 2 * int(new_npts / 2) t_db = np.arange(new_npts) / factor acc_interp = np.interp(t_db, t_int, values) return acc_interp, dt / factor def interp_to_approx_dt(asig, target_dt=0.01, even=True): """ Interpolate a signal to a new time step Only a target time step is provided and the algorithm determines what time step is best to minimise loss of data from aliasing Parameters ---------- asig: eqsig.AccSignal Acceleration time series object target_dt: float Target time step even: bool If true then forces the number of time steps to be an even number Returns ------- new_asig: eqsig.AccSignal Acceleration time series object of interpolated time series """ acc_interp, dt_interp = interp_array_to_approx_dt(asig.values, asig.dt, target_dt=target_dt, even=even) return eqsig.AccSignal(acc_interp, dt_interp) def resample_to_approx_dt(asig, target_dt=0.01, even=True): """ Resample a signal assuming periodic to a new time step Only a target time step is provided and the algorithm determines what time step is best to minimise loss of data from aliasing Parameters ---------- asig: eqsig.AccSignal Acceleration time series object target_dt: float Target time step even: bool If true then forces the number of time steps to be an even number Returns ------- """ from scipy.signal import resample factor = asig.dt / target_dt if factor == 1: pass elif factor > 1: factor = int(np.ceil(factor)) else: factor = 1 / np.floor(1 / factor) new_npts = factor * asig.npts if even: new_npts = 2 * int(new_npts / 2) acc_interp = resample(asig.values, new_npts) return eqsig.AccSignal(acc_interp, asig.dt / factor) def get_switched_peak_indices(asig): """ Find the indices for largest peak between each zero crossing Parameters ---------- asig: eqsig.AccSignal Returns ------- array_like """ values = asig if hasattr(asig, "values"): values = asig.values return get_switched_peak_array_indices(values) def get_switched_peak_array_indices(values): """ Find the indices for largest peak between each zero crossing Parameters ---------- values: array_like Returns ------- array_like """ peak_indices = get_peak_array_indices(values) peak_values = np.take(values, peak_indices) last = peak_values[0] new_peak_indices = [] peak_values_set = [0] peak_indices_set = [0] for i in range(1, len(peak_values)): if peak_values[i] * last <= 0: # only add index if sign changes (negative number) i_max_set = np.argmax(np.abs(peak_values_set)) new_peak_indices.append(peak_indices_set[i_max_set]) last = peak_values[i] peak_values_set = [] # reset set peak_indices_set = [] peak_values_set.append(peak_values[i]) peak_indices_set.append(i) if len(peak_values_set): # add last i_max_set = np.argmax(np.abs(peak_values_set)) new_peak_indices.append(peak_indices_set[i_max_set]) peak_values_set.append(peak_values[i]) peak_indices_set.append(i) switched_peak_indices = np.take(peak_indices, new_peak_indices) return switched_peak_indices def get_sig_array_indexes_range(fas1_smooth, ratio=15): max_fas1 = max(fas1_smooth) lim_fas = max_fas1 / ratio indys = np.where(fas1_smooth > lim_fas)[0] return indys[0], indys[-1] # min_freq_i = 10000 # max_freq_i = 10000 # for i in range(len(fas1_smooth)): # if fas1_smooth[i] > lim_fas: # min_freq_i = i # break # for i in range(len(fas1_smooth)): # if fas1_smooth[-1 - i] > lim_fas: # max_freq_i = len(fas1_smooth) - i # break # return min_freq_i, max_freq_i def get_sig_freq_range(asig, ratio=15): indices = get_sig_array_indexes_range(asig.smooth_fa_spectrum, ratio=ratio) return np.take(asig.smooth_fa_frequencies, indices) def calc_fourier_moment(asig, n): """ Original source unknown. See :cite:`Rathje:2008va` Parameters ---------- asig n Returns ------- """ return 2 * np.trapz((2 * np.pi * asig.fa_frequencies) ** n * asig.fa_spectrum ** 2, x=asig.fa_frequencies) def get_bandwidth_boore_2003(asig): m0 = calc_fourier_moment(asig, 0) m2 = calc_fourier_moment(asig, 2) m4 = calc_fourier_moment(asig, 4) return np.sqrt(m2 ** 2 / (m0 * m4)) def put_array_in_2d_array(values, shifts, clip='none'): """ Creates a 2D array where values appear on each line, shifted by a set of indices Parameters ---------- values: array_like (1d) Values to be shifted shifts: array_like (int) Indices to shift values clip: str or none if 'end' then returned 2D array trims values that overlap end of input values array Returns ------- array_like (2D) """ npts = len(values) # assert shifts is integer array end_extras = np.max([np.max(shifts), 0]) start_extras = - np.min([np.min(shifts), 0]) out = np.zeros((len(shifts), npts + start_extras + end_extras)) for i, j in enumerate(shifts): out[i, start_extras + j:start_extras + npts + j] = values if clip in ['end', 'both'] and end_extras > 0: out = out[:, :-end_extras] if clip in ['start', 'both']: return out[:, start_extras:] else: return out def join_sig_w_time_shift(sig, time_shifts, jtype='add'): """ Zero pads values of a signal by an array of time shifts and joins it with the original Parameters ---------- sig: eqsig.Signal signal to be shifted time_shifts: array_like Time shifts to be performed jtype: str (default='add') if = 'add' then shifted and original signals are added, if ='sub' then subtracted Returns ------- shifted_values: array_like [2D shape(len(sig.values), len(shift))] """ shifts = np.array(time_shifts / sig.dt, dtype=int) values = sig.values return join_values_w_shifts(values, shifts, jtype=jtype) def join_values_w_shifts(values, shifts, jtype='add'): """ Zero pads values by an array of shifts and joins it with the original values Parameters ---------- values: array_like values to be shifted shifts: array_like [int] Shifts to be performed jtype: str (default='add') if = 'add' then shifted and original values are added, if ='sub' then subtracted Returns ------- shifted_values: array_like [2D shape(len(values), len(shift))] """ a0 = np.pad(values, (0, np.max(shifts)), mode='constant', constant_values=0) # 1d a1 = put_array_in_2d_array(values, shifts) if jtype == 'add': return a1 + a0 elif jtype == 'sub': return -a1 + a0 def get_section_average(series, start=0, end=-1, index=False): """ Gets the average value of a part of series. Common use is so that it can be patched with another record. :param series: A TimeSeries object :param start: int or float, optional, Section start point :param end: int or float, optional, Section end point :param index: bool, optional, if False then start and end are considered values in time. :return float, The mean value of the section. """ s_index, e_index = time_indices(series.npts, series.dt, start, end, index) section_average = np.mean(series.values[s_index:e_index]) return section_average def time_indices(npts, dt, start, end, index): """ Determine the new start and end indices of the time series. :param npts: Number of points in original time series :param dt: Time step of original time series :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. :return: tuple, start index, end index """ if index is False: # Convert time values into indices if end != -1: e_index = int(end / dt) + 1 else: e_index = end s_index = int(start / dt) else: s_index = start e_index = end if e_index > npts: raise exceptions.SignalProcessingWarning("Cut point is greater than time series length") return s_index, e_index def calc_smooth_fa_spectrum(fa_frequencies, fa_spectrum, smooth_fa_frequencies=None, band=40): """ Calculates the smoothed Fourier Amplitude Spectrum using the method by Konno and Ohmachi (1998) Note: different order of inputs than generate_smooth_fa_spectrum Parameters ---------- smooth_fa_frequencies: array_like Frequencies to compute the smoothed amplitude fa_frequencies: array_like Frequencies of the Fourier amplitude spectrum fa_spectrum: array_like Amplitudes of the Fourier amplitude spectrum band: window parameter Returns ------- smoothed_fa_spectrum: array_like Amplitudes of smoothed Fourier spectrum at specified frequencies """ if fa_frequencies[0] == 0: fa_frequencies = fa_frequencies[1:] fa_spectrum = fa_spectrum[1:] if smooth_fa_frequencies is None: smooth_fa_frequencies = fa_frequencies amp_array = band * np.log10(fa_frequencies[:, np.newaxis] / smooth_fa_frequencies[np.newaxis, :]) wb_vals = (np.sin(amp_array) / amp_array) ** 4 wb_vals = np.where(amp_array == 0, 1, wb_vals) wb_vals /= np.sum(wb_vals, axis=0) return np.sum(abs(fa_spectrum)[:, np.newaxis] * wb_vals, axis=0) # return np.dot(abs(fa_spectrum), wb_vals) def generate_smooth_fa_spectrum(smooth_fa_frequencies, fa_frequencies, fa_spectrum, band=40): """Deprecated - use calc_smooth_fa_spectrum""" return calc_smooth_fa_spectrum(fa_frequencies, fa_spectrum, smooth_fa_frequencies, band=band) def calc_smoothing_matrix_konno_1998(fa_frequencies, smooth_fa_frequencies=None, band=40): """ Calculates the smoothing matrix for computing the smoothed Fourier Amplitude Spectrum using the method by Konno and Ohmachi 1998 Parameters ---------- fa_frequencies: array_like Frequencies of FAS smooth_fa_frequencies: array_like Frequencies that smooth FAS should be computed at band: int Bandwidth of smoothing function Returns ------- 2d-array_like """ if fa_frequencies[0] == 0: fa_frequencies = fa_frequencies[1:] if smooth_fa_frequencies is None: smooth_fa_frequencies = fa_frequencies amp_array = band * np.log10(fa_frequencies[:, np.newaxis] / smooth_fa_frequencies[np.newaxis, :]) wb_vals = (np.sin(amp_array) / amp_array) ** 4 wb_vals = np.where(amp_array == 0, 1, wb_vals) wb_vals /= np.sum(wb_vals, axis=0) return wb_vals def calc_smooth_fa_spectrum_w_custom_matrix(asig, smooth_matrix): """ Calculates the smoothed Fourier Amplitude Spectrum using a custom filter """ return np.dot(abs(asig.fa_spectrum[1:]), smooth_matrix) # def dep_generate_smooth_fa_spectrum(smooth_fa_frequencies, fa_frequencies, fa_spectrum, band=40): # if fa_frequencies[0] == 0: # fa_frequencies = fa_frequencies[1:] # fa_spectrum = fa_spectrum[1:] # smooth_fa_spectrum = np.zeros_like(smooth_fa_frequencies) # TODO: remove for loop # for i in range(smooth_fa_frequencies.size): # f_centre = smooth_fa_frequencies[i] # amp_array = band * np.log10(fa_frequencies / f_centre) # wb_vals = np.where(amp_array == 0, 1, (np.sin(amp_array) / amp_array) ** 4) # # smooth_fa_spectrum[i] = (np.sum(abs(fa_spectrum) * wb_vals) / np.sum(wb_vals)) # return smooth_fa_spectrum def calc_step_fn_vals_error(values, pow=1, dir=None): """ Calculates the error function generated by fitting a step function to the values Note: Assumes minimum error is at the minimum sum of the error, regardless of the `pow`. I.e. The best fit is taken as the mean of the values. Parameters ---------- values: array_like pow: int The power that the error should be raised to dir: str Desired direction of the step function if 'down', then all upward steps are set to 10x maximum error if 'up', then all downward steps are set to 10x maximum error else, no modification to error Returns ------- array_like (len same as input array) """ values = np.array(values) npts = len(values) pre_a = np.tril(values, k=0) post_a = np.triu(values, k=0) pre_n = np.arange(1, len(values) + 1) post_n = np.arange(len(values), 0, -1) pre_mean = np.sum(pre_a, axis=1) / pre_n post_mean = np.sum(post_a, axis=1) / post_n err_pre = np.sum(np.abs(pre_a - pre_mean[:, np.newaxis]) ** pow, axis=1) - (npts - pre_n) * pre_mean ** pow err_post = np.sum(np.abs(post_a - post_mean[:, np.newaxis]) ** pow, axis=1) - (npts - post_n) * post_mean ** pow # case of 0 has been remove, n + 1 options # consider cases where it happens in between two steps err = np.ones_like(values) err[:-1] = err_post[1:] + err_pre[:-1] err[-1] = np.sum(np.abs(values - np.mean(values)) ** pow) if dir == 'down': # if step has to be downward, then increase error for upward steps max_err = np.max(err) err = np.where(pre_mean < post_mean, max_err * 10, err) if dir == 'up': max_err = np.max(err) err = np.where(pre_mean > post_mean, max_err * 10, err) return err def calc_step_fn_steps_vals(values, ind=None): if ind is None: ind = np.argmin(calc_step_fn_vals_error(values)) pre = np.mean(values[:ind]) post = np.mean(values[ind + 1:]) return pre, post def calc_roll_av_vals(values, steps, mode='forward'): """ Calculates the rolling average of a series of values Parameters ---------- values: array_like steps: int size of window to average over mode: str (default='forward') if 'forward' current value at start of window if 'backward' current value at end of window if 'centre' or 'center' current value in centre of window Returns ------- array_like (len same as input array) """ values = np.array(values) steps = int(steps) if mode == 'forward': x_ext = np.concatenate([values, values[-1] * np.ones(steps - 1)]) elif mode == 'backward': x_ext = np.concatenate([values[0] * np.ones(steps - 1), values]) else: s = int(np.floor(steps / 2)) e = steps - s - 1 x_ext = np.concatenate([values[0] * np.ones(s), values, values[-1] * np.ones(e)]) csum = np.zeros(len(values) + steps) csum[1:] = np.cumsum(x_ext, dtype=float) return (csum[steps:] - csum[:-steps]) / steps def interp2d(x, xf, f): """ Can interpolate a table to get an array of values in 2D Parameters ---------- x: array_like 1d array of values to be interpolated xf: 1d array of values f: array_like 2d array of function values size=(len(x), n) Returns ------- Examples -------- >>> f = np.array([[0, 0, 0], >>> [0, 1, 4], >>> [2, 6, 2], >>> [10, 10, 10] >>> ]) >>> xf = np.array([0, 1, 2, 3]) >>> x = np.array([0.5, 1, 2.2, 2.5]) >>> f_interp = interp2d(x, xf, f) >>> print(f_interp[0][0]) 0.0 >>> print(f_interp[0][1]) 0.5 >>> print(f_interp[0][2]) 2.0 """ ind = np.argmin(np.abs(x[:, np.newaxis] - xf), axis=1) x_ind = xf[ind] ind0 = np.where(x_ind > x, ind - 1, ind) ind1 = np.where(x_ind > x, ind, ind + 1) ind0 = np.clip(ind0, 0, None) ind1 = np.clip(ind1, None, len(xf) - 1) f0 = f[ind0] f1 = f[ind1] a0 = xf[ind0] a1 = xf[ind1] denom = (a1 - a0) denom_adj = np.clip(denom, 1e-10, None) # to avoid divide by zero warning s0 = np.where(denom > 0, (x - a0) / denom_adj, 1) # if denom less than 0, then out of bounds s1 = 1 - s0 return s1[:, np.newaxis] * f0 + s0[:, np.newaxis] * f1 def interp_left(x0, x, y=None): """ Interpolation takes the lower value Parameters ---------- x0: array_like Values to be interpolated on x-axis x: array_like Existing values on x-axis y: array_like Existing y-axis values Returns ------- """ if y is None: y = np.arange(len(x)) else: y = np.array(y) is_scalar = False if not hasattr(x0, '__len__'): is_scalar = True x0 = [x0] assert min(x0) >= x[0], (min(x0), x[0]) inds = np.searchsorted(x, x0, side='right') - 1 if is_scalar: return y[inds][0] return y[inds] def remove_poly(values, poly_fit=0): """ Calculates best fit polynomial and removes it from the record """ x = np.linspace(0, 1.0, len(values)) cofs = np.polyfit(x, values, poly_fit) y_cor = 0 * x for co in range(len(cofs)): mods = x ** (poly_fit - co) y_cor += cofs[co] * mods return values - y_cor def gen_ricker_wavelet_asig(omega, t0, duration, dt): """ Generates an acceleration time series that is a Ricker wavelet Parameters ---------- omega t0 duration: float Total duration of motion dt: float Time step of motion Returns ------- """ t = np.arange(0, duration, dt) vel_amp = (2.0 * (np.pi ** 2.0) * (omega ** 2.0) * ((t - t0) ** 2.0) - 1.0) * np.exp( - (np.pi ** 2.0) * (omega ** 2.0) * (t - t0) ** 2.0) acc = np.zeros_like(vel_amp) acc[1:] = np.diff(vel_amp) / dt return eqsig.AccSignal(acc, dt) if __name__ == '__main__': x0 = [0, 1, 5] x = [0, 2, 6] y = [1.5, 2.5, 3.5] y_new = interp_left(x0, x, y) expected = np.array([1.5, 1.5, 2.5]) assert np.isclose(y_new, expected).all(), y_new x0 = [0, 2, 6] y_new = interp_left(x0, x, y) expected = np.array([1.5, 2.5, 3.5]) assert np.isclose(y_new, expected).all(), y_new x0 = [-1, 2, 6] y_new = interp_left(x0, x, y) expected = np.array([1.5, 2.5, 3.5]) # assert np.isclose(y_new, expected).all(), y_new print(y_new)
29.678076
116
0.628109
bbfab51ced21a16282ca8153189b3c749bcf1c3f
948
py
Python
scripts/numpytest.py
cscorley/triage
b7a2de8c5955418acf2b9bd93953a83327052257
[ "MIT" ]
3
2019-01-13T13:03:53.000Z
2020-05-03T21:34:43.000Z
scripts/numpytest.py
cscorley/triage
b7a2de8c5955418acf2b9bd93953a83327052257
[ "MIT" ]
null
null
null
scripts/numpytest.py
cscorley/triage
b7a2de8c5955418acf2b9bd93953a83327052257
[ "MIT" ]
1
2018-09-30T13:42:10.000Z
2018-09-30T13:42:10.000Z
import numpy as np import numpy.random as npr import time # modified from # https://hunseblog.wordpress.com/2014/09/15/installing-numpy-and-openblas/ np.show_config() # --- Test 1 N = 1 n = 1000 A = npr.randn(n,n) B = npr.randn(n,n) t = time.time() for i in range(N): C = np.dot(A, B) td = time.time() - t print("dotted two (%d,%d) matrices in %0.1f ms" % (n, n, 1e3*td/N)) # --- Test 2 N = 100 n = 4000 A = npr.randn(n) B = npr.randn(n) t = time.time() for i in range(N): C = np.dot(A, B) td = time.time() - t print("dotted two (%d) vectors in %0.2f us" % (n, 1e6*td/N)) # --- Test 3 m,n = (2000,1000) A = npr.randn(m,n) t = time.time() [U,s,V] = np.linalg.svd(A, full_matrices=False) td = time.time() - t print("SVD of (%d,%d) matrix in %0.3f s" % (m, n, td)) # --- Test 4 n = 1500 A = npr.randn(n,n) t = time.time() w, v = np.linalg.eig(A) td = time.time() - t print("Eigendecomp of (%d,%d) matrix in %0.3f s" % (n, n, td))
17.555556
75
0.583333
323eccad61557f1ed069af7dc0d1541ccef7f374
3,185
py
Python
testCases/client/clients_list/mobile/TC_209_101_mobile_create_private_list_test.py
harry-100/qa-automation-framework
5fbe03e930820537e53f2d26b1c2b2bd2b222bf5
[ "MIT" ]
null
null
null
testCases/client/clients_list/mobile/TC_209_101_mobile_create_private_list_test.py
harry-100/qa-automation-framework
5fbe03e930820537e53f2d26b1c2b2bd2b222bf5
[ "MIT" ]
null
null
null
testCases/client/clients_list/mobile/TC_209_101_mobile_create_private_list_test.py
harry-100/qa-automation-framework
5fbe03e930820537e53f2d26b1c2b2bd2b222bf5
[ "MIT" ]
null
null
null
import pytest from time import sleep from utilities import XLUtility from pageObjects.common_functions.common_methods import CommonMethods @pytest.mark.usefixtures("one_time_setup") class Test_TC209_001_MobileCreatePrivateList: @pytest.fixture(autouse=True) def class_setup(self, one_time_setup): self.driver.set_window_size(411, 823) self.logIn() def test_mobile_create_private_client_list(self): common = CommonMethods(self.driver) self.log.info("starting test {}...".format(__name__)) client_name = [] # get the required data list_name = XLUtility.readData(self.path_1, 'clients_list_mobile_data', 2, 2) tag_name = XLUtility.readData(self.path_1, 'clients_list_mobile_data', 2, 3) client_name.append(XLUtility.readData(self.path_1, 'clients_list_mobile_data', 2, 4)) client_name.append(XLUtility.readData(self.path_1, 'clients_list_mobile_data', 2, 5)) # Add tags self.login_page_obj.clk_navigation_btn() self.login_page_obj.clk_mobile_settings_practice_details() self.waitlist_tags_page_obj.clk_settings_tags() self.waitlist_tags_page_obj.input_settings_tag_title(tag_name) self.waitlist_tags_page_obj.clk_settings_add_tag() for i in range(2): self.login_page_obj.clk_navigation_btn() self.client_page_obj.clk_all_clients_mobile() self.client_page_obj. mobile_sel_client_name(client_name[i]) self.client_page_obj.clk_view_client_mobile() sleep(2) self.client_page_obj.clk_mobile_open_client_menu() sleep(1) #self.driver.refresh() #self.waitlist_tags_page_obj.clk_client_add_tags() self.waitlist_tags_page_obj.sel_mobile_client_tag(tag_name) sleep(2) self.waitlist_tags_page_obj.clk_mobile_client_profile_close() #self.driver.refresh() self.login_page_obj.clk_navigation_btn() self.client_page_obj.clk_all_clients_mobile() self.client_page_obj.clk_mobile_open_client_menu() # Create a Client List self.clients_list_page_obj.clk_add_custom_list() self.clients_list_page_obj.input_list_name(list_name) self.clients_list_page_obj.input_tag_name(tag_name) self.clients_list_page_obj.clk_table_columns() self.clients_list_page_obj.clk_column_first_name() self.clients_list_page_obj.clk_column_last_name() self.clients_list_page_obj.clk_column_city() self.clients_list_page_obj.clk_add_client_list() sleep(2) clients_list = self.clients_list_page_obj.get_clients_private_list() print("client lists ", clients_list) # delete the list created no_of_lists = self.clients_list_page_obj.get_no_of_mobile_client_lists() for i in range(no_of_lists): self.clients_list_page_obj.clk_mobile_client_list() sleep(1) self.clients_list_page_obj.clk_mobile_delete_client_list() self.clients_list_page_obj.clk_mobile_confirm_delete_client_list() # delete the tag created self.login_page_obj.clk_navigation_btn() self.login_page_obj.clk_mobile_settings_practice_details() self.waitlist_tags_page_obj.clk_settings_tags() # delete any existing tags in settings common.delete_settings_tags() if list_name in clients_list: self.log.info("{} passed!".format(__name__)) assert True else: self.log.info("{} failed!".format(__name__)) assert False
36.609195
87
0.807221
7f3f7f578f1a6252210f5331e0457b43b0fab8b9
8,234
py
Python
dejavu/database_handler/clickhouse_database.py
Saoqq/dejavu
8d76350bb754ed5246ab735e72680d4b8e00d507
[ "MIT" ]
1
2021-08-17T18:22:18.000Z
2021-08-17T18:22:18.000Z
dejavu/database_handler/clickhouse_database.py
Saoqq/dejavu
8d76350bb754ed5246ab735e72680d4b8e00d507
[ "MIT" ]
null
null
null
dejavu/database_handler/clickhouse_database.py
Saoqq/dejavu
8d76350bb754ed5246ab735e72680d4b8e00d507
[ "MIT" ]
null
null
null
import queue import uuid from typing import List, Tuple, Dict from clickhouse_driver import connect from dejavu.base_classes.common_database import CommonDatabase from dejavu.config.settings import (FIELD_FILE_SHA1, FIELD_FINGERPRINTED, FIELD_HASH, FIELD_OFFSET, FIELD_SONG_ID, FIELD_SONGNAME, FIELD_TOTAL_HASHES, FINGERPRINTS_TABLENAME, SONGS_TABLENAME) class ClickhouseDatabase(CommonDatabase): type = "clickhouse" CREATE_SONGS_TABLE = f''' CREATE TABLE IF NOT EXISTS {SONGS_TABLENAME}( {FIELD_SONG_ID} UUID, {FIELD_SONGNAME} String, {FIELD_FINGERPRINTED} Int8 DEFAULT 1, {FIELD_FILE_SHA1} String, `date_created` DateTime DEFAULT now(), {FIELD_TOTAL_HASHES} Int32 ) ENGINE = MergeTree() PARTITION BY toYYYYMM(date_created) ORDER BY tuple() ''' CREATE_FINGERPRINTS_TABLE = f''' CREATE TABLE IF NOT EXISTS {FINGERPRINTS_TABLENAME} ( {FIELD_HASH} String, {FIELD_SONG_ID} UUID, {FIELD_OFFSET} Int32, `date_created` DateTime DEFAULT now() ) ENGINE = MergeTree() PARTITION BY toYYYYMM(date_created) ORDER BY (`date_created`, {FIELD_SONG_ID}) ''' INSERT_FINGERPRINT = f''' INSERT INTO {FINGERPRINTS_TABLENAME} ( {FIELD_SONG_ID}, {FIELD_HASH}, {FIELD_OFFSET} ) VALUES ''' INSERT_SONG = f''' INSERT INTO {SONGS_TABLENAME} ( {FIELD_SONG_ID}, {FIELD_SONGNAME}, {FIELD_FILE_SHA1}, {FIELD_TOTAL_HASHES} ) VALUES (%(0)s, %(1)s, %(2)s, %(3)s) ''' SELECT = f''' SELECT {FIELD_SONG_ID}, {FIELD_OFFSET} FROM {FINGERPRINTS_TABLENAME} WHERE {FIELD_HASH} = (%(0)s) ''' SELECT_MULTIPLE = f''' SELECT upper({FIELD_HASH}), {FIELD_SONG_ID}, {FIELD_OFFSET} FROM {FINGERPRINTS_TABLENAME} WHERE upper({FIELD_HASH}) IN (%(0)s) ''' SELECT_ALL = f'SELECT {FIELD_SONG_ID}, {FIELD_OFFSET} FROM {FINGERPRINTS_TABLENAME}' SELECT_SONG = f''' SELECT {FIELD_SONGNAME}, upper({FIELD_FILE_SHA1}) AS {FIELD_FILE_SHA1}, {FIELD_TOTAL_HASHES} FROM {SONGS_TABLENAME} WHERE {FIELD_SONG_ID} = toUUID(%(0)s) ''' SELECT_NUM_FINGERPRINTS = f'SELECT count() AS n FROM {FINGERPRINTS_TABLENAME}' SELECT_UNIQUE_SONG_IDS = f''' SELECT count({FIELD_SONG_ID}) AS n FROM {SONGS_TABLENAME} WHERE {FIELD_FINGERPRINTED} = 1; ''' SELECT_SONGS = f''' SELECT {FIELD_SONG_ID}, {FIELD_SONGNAME}, upper({FIELD_FILE_SHA1}) AS {FIELD_FILE_SHA1}, {FIELD_TOTAL_HASHES}, date_created FROM {SONGS_TABLENAME} WHERE {FIELD_FINGERPRINTED} = 1; ''' IN_MATCH = f'%s' DROP_FINGERPRINTS = f'DROP TABLE IF EXISTS {FINGERPRINTS_TABLENAME}' DROP_SONGS = f'DROP TABLE IF EXISTS {SONGS_TABLENAME}' DELETE_UNFINGERPRINTED = 'SELECT now()' def __init__(self, **options): super().__init__() self.cursor = cursor_factory(**options) self._options = options def after_fork(self) -> None: # Clear the cursor cache, we don't want any stale connections from # the previous process. Cursor.clear_cache() def insert_song(self, song_name: str, file_hash: str, total_hashes: int) -> str: """ Inserts a song name into the database, returns the new identifier of the song. :param song_name: The name of the song. :param file_hash: Hash from the fingerprinted file. :param total_hashes: amount of hashes to be inserted on fingerprint table. :return: the inserted id. """ song_id = str(uuid.uuid4()) with self.cursor() as cur: params = [song_id, song_name, file_hash, total_hashes] keys = [str(x) for x in range(len(params))] cur.execute(self.INSERT_SONG, dict(zip(keys, params))) # cur.execute(self.INSERT_SONG, [song_id, song_name, file_hash, total_hashes]) return song_id def __getstate__(self): return self._options, def __setstate__(self, state): self._options, = state self.cursor = cursor_factory(**self._options) def delete_unfingerprinted_songs(self) -> None: raise NotSupportedException('Not supported') def delete_songs_by_id(self, song_ids: List[int], batch_size: int = 1000) -> None: raise NotSupportedException('Not supported') def set_song_fingerprinted(self, song_id): raise NotSupportedException('Not supported') def insert_hashes(self, song_id: int, hashes: List[Tuple[str, int]], batch_size: int = 10000) -> None: super().insert_hashes(song_id, hashes, batch_size) def return_matches(self, hashes: List[Tuple[str, int]], batch_size: int = 5000) -> Tuple[ List[Tuple[int, int]], Dict[int, int]]: # Create a dictionary of hash => offset pairs for later lookups mapper = {} for hsh, offset in hashes: if hsh.upper() in mapper.keys(): mapper[hsh.upper()].append(offset) else: mapper[hsh.upper()] = [offset] values = list(mapper.keys()) # in order to count each hash only once per db offset we use the dic below dedup_hashes = {} results = [] with self.cursor() as cur: for index in range(0, len(values), batch_size): # Create our IN part of the query cur.execute(self.SELECT_MULTIPLE, {"0": values[index: index + batch_size]}) for hsh, sid, offset in cur: if sid not in dedup_hashes.keys(): dedup_hashes[sid] = 1 else: dedup_hashes[sid] += 1 # we now evaluate all offset for each hash matched for song_sampled_offset in mapper[hsh]: results.append((sid, offset - song_sampled_offset)) return results, dedup_hashes def get_song_by_id(self, song_id: int) -> Dict[str, str]: with self.cursor(dictionary=True) as cur: cur.execute(self.SELECT_SONG, {"0": song_id}) res = cur.fetchone() return { f'{FIELD_SONGNAME}': res[0], f'{FIELD_FILE_SHA1}': res[1], f'{FIELD_TOTAL_HASHES}': res[2] } def cursor_factory(**factory_options): def cursor(**options): options.update(factory_options) return Cursor(**options) return cursor class NotSupportedException(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) class Cursor(object): """ Establishes a connection to the database and returns an open cursor. # Use as context manager with Cursor() as cur: cur.execute(query) ... """ def __init__(self, dictionary=False, **options): super().__init__() self._cache = queue.Queue(maxsize=5) try: conn = self._cache.get_nowait() # Ping the connection before using it from the cache. conn.ping(True) except queue.Empty: conn = connect(**options) self.conn = conn self.dictionary = dictionary @classmethod def clear_cache(cls): cls._cache = queue.Queue(maxsize=5) def __enter__(self): if self.dictionary: self.cursor = self.conn.cursor() else: self.cursor = self.conn.cursor() return self.cursor def __exit__(self, extype, exvalue, traceback): # if we had a PostgreSQL related error we try to rollback the cursor. self.conn.commit() # Put it back on the queue try: self._cache.put_nowait(self.conn) except queue.Full: self.conn.close()
31.914729
106
0.585742
c130335d45f056052863c454ca7b73a3a54b7527
1,509
py
Python
calibration-tools/wiringpi_test_linear.py
mrwunderbar666/rpi-vumonitor-python
47e80b2811e513b9011b9ba8d0f1b1ab2c87c915
[ "MIT" ]
2
2017-10-15T23:21:48.000Z
2020-02-17T07:09:37.000Z
calibration-tools/wiringpi_test_linear.py
mrwunderbar666/rpi-vumonitor-python
47e80b2811e513b9011b9ba8d0f1b1ab2c87c915
[ "MIT" ]
null
null
null
calibration-tools/wiringpi_test_linear.py
mrwunderbar666/rpi-vumonitor-python
47e80b2811e513b9011b9ba8d0f1b1ab2c87c915
[ "MIT" ]
2
2020-02-17T07:09:40.000Z
2021-05-17T19:32:07.000Z
#!/usr/bin/python """ == VU Meter Calibration Toolkit == ==== Pulse Width Modulation and WiringPi GPIO Library ==== Hardware PWM Loop trough the min and max value uses linear function Requires: - Wiring Pi MIT License """ from __future__ import division import wiringpi import time import math # PWM Maximum Duty Cycle S = 200 # math stuff # Growth function constants B0 = 0 """ Configure your pin here """ vu_pin1 = 18 # Physical Pin 12 vu_pin2 = 13 # Physical Pin 33 OUTPUT = 2 wiringpi.wiringPiSetupGpio() # Linear function def B(t): return (S / 100) * t try: wiringpi.pinMode(vu_pin1, OUTPUT) wiringpi.pinMode(vu_pin2, OUTPUT) wiringpi.pwmWrite(vu_pin1, 0) # Setup PWM using Pin, Initial Value wiringpi.pwmWrite(vu_pin2, 0) while True: for i in range(100): pwm_float = B(i) wiringpi.pwmWrite(vu_pin1, int(pwm_float)) wiringpi.pwmWrite(vu_pin2, int(pwm_float)) print("i = {}, pwm_float = {}" .format(i, pwm_float)) time.sleep(0.1) for i in range(100, 0, -1): pwm_float = B(i) wiringpi.pwmWrite(vu_pin1, int(pwm_float)) wiringpi.pwmWrite(vu_pin2, int(pwm_float)) print("i = {}, pwm_float = {}" .format(i, pwm_float)) time.sleep(0.1) except KeyboardInterrupt: # manual cleanup wiringpi.pwmWrite(vu_pin1, 0) wiringpi.pwmWrite(vu_pin2, 0) wiringpi.pinMode(vu_pin1, 0) wiringpi.pinMode(vu_pin2, 0) pass
20.12
71
0.636846
130938f0b22b0908b0935b0bf7c92aa4b7a8e4e5
4,951
py
Python
core/src/autogluon/core/searcher/local_searcher.py
daobook/autogluon
7309118f2ab1c9519f25acf61a283a95af95842b
[ "Apache-2.0" ]
1
2020-09-02T01:10:25.000Z
2020-09-02T01:10:25.000Z
core/src/autogluon/core/searcher/local_searcher.py
daobook/autogluon
7309118f2ab1c9519f25acf61a283a95af95842b
[ "Apache-2.0" ]
3
2021-12-30T20:28:01.000Z
2022-02-09T20:19:21.000Z
core/src/autogluon/core/searcher/local_searcher.py
engsarah365/autogluon
bdbaac2d13d14d075b7aa751561f0bbd39927789
[ "Apache-2.0" ]
null
null
null
import logging import pickle from collections import OrderedDict from ..space import Categorical, Space __all__ = ['LocalSearcher'] logger = logging.getLogger(__name__) class LocalSearcher(object): """Local Searcher (virtual class to inherit from if you are creating a custom Searcher). Parameters ---------- search_space: dict The configuration space to sample from. It contains the full specification of the Hyperparameters with their priors """ def __init__(self, search_space, reward_attribute=None, **kwargs): """ :param search_space: Configuration space to sample from or search in :param reward_attribute: Reward attribute passed to update. Default: 'accuracy' """ self.search_space = search_space self._results = OrderedDict() if reward_attribute is None: reward_attribute = 'accuracy' self._reward_attribute = reward_attribute self._params_default = self._get_params_default() def _get_params_default(self) -> dict: params_default = dict() for key, val in self.search_space.items(): if isinstance(val, Space): if isinstance(val, Categorical): # FIXME: Don't do this, fix the outer code to not require this d = val.data[0] else: d = val.default params_default[key] = d return params_default # FIXME: Consider removing def configure_scheduler(self, scheduler): """ Some searchers need to obtain information from the scheduler they are used with, in order to configure themselves. This method has to be called before the searcher can be used. The implementation here sets _reward_attribute for schedulers which specify it. Args: scheduler: TaskScheduler Scheduler the searcher is used with. """ from ..scheduler.seq_scheduler import LocalSequentialScheduler if isinstance(scheduler, LocalSequentialScheduler): self._reward_attribute = scheduler._reward_attr @staticmethod def _reward_while_pending(): """Defines the reward value which is assigned to config, while it is pending.""" return float("-inf") def get_config(self, **kwargs): """Function to sample a new configuration This function is called inside TaskScheduler to query a new configuration Args: kwargs: Extra information may be passed from scheduler to searcher returns: (config, info_dict) must return a valid configuration and a (possibly empty) info dict """ raise NotImplementedError(f'This function needs to be overwritten in {self.__class__.__name__}.') def update(self, config, **kwargs): """Update the searcher with the newest metric report""" reward = kwargs.get(self._reward_attribute, None) assert reward is not None, "Missing reward attribute '{}'".format(self._reward_attribute) config_pkl = pickle.dumps(config) self._results[config_pkl] = reward def register_pending(self, config, milestone=None): """ Signals to searcher that evaluation for config has started, but not yet finished, which allows model-based searchers to register this evaluation as pending. For multi-fidelity schedulers, milestone is the next milestone the evaluation will attend, so that model registers (config, milestone) as pending. In general, the searcher may assume that update is called with that config at a later time. """ pass def evaluation_failed(self, config, **kwargs): """ Called by scheduler if an evaluation job for config failed. The searcher should react appropriately (e.g., remove pending evaluations for this config, and blacklist config). """ pass def get_best_reward(self): """Calculates the reward (i.e. validation performance) produced by training under the best configuration identified so far. Assumes higher reward values indicate better performance. """ if self._results: return max(self._results.values()) return self._reward_while_pending() def get_reward(self, config): """Calculates the reward (i.e. validation performance) produced by training with the given configuration. """ k = pickle.dumps(config) assert k in self._results return self._results[k] def get_best_config(self): """Returns the best configuration found so far. """ if self._results: config_pkl = max(self._results, key=self._results.get) return pickle.loads(config_pkl) else: return dict()
36.404412
131
0.647142
6bca191e6311ab97e50dc2fa57c9bbc1bff84809
4,743
py
Python
nix_update/__init__.py
jonringer/nix-update
4d35310c51739407c9831ba5ffb8c3de5871ed65
[ "MIT" ]
null
null
null
nix_update/__init__.py
jonringer/nix-update
4d35310c51739407c9831ba5ffb8c3de5871ed65
[ "MIT" ]
null
null
null
nix_update/__init__.py
jonringer/nix-update
4d35310c51739407c9831ba5ffb8c3de5871ed65
[ "MIT" ]
null
null
null
import argparse import os import sys import tempfile from typing import NoReturn, Optional from .eval import Package from .options import Options from .update import update from .utils import run, is_nix_flakes def die(msg: str) -> NoReturn: print(msg, file=sys.stderr) sys.exit(1) def parse_args() -> Options: parser = argparse.ArgumentParser() help = "File to import rather than default.nix. Examples, ./release.nix" parser.add_argument("-f", "--file", default="./.", help=help) parser.add_argument("--build", action="store_true", help="build the package") parser.add_argument( "--commit", action="store_true", help="Commit the updated package" ) parser.add_argument( "--run", action="store_true", help="provide a shell based on `nix run` with the package in $PATH", ) parser.add_argument( "--shell", action="store_true", help="provide a shell with the package" ) parser.add_argument( "--version", nargs="?", help="Version to update to", default="auto" ) parser.add_argument("attribute", help="Attribute name within the file evaluated") args = parser.parse_args() return Options( import_path=args.file, build=args.build, commit=args.commit, run=args.run, shell=args.shell, version=args.version, attribute=args.attribute, ) def nix_shell(options: Options) -> None: import_path = os.path.realpath(options.import_path) expr = f"with import {import_path} {{}}; mkShell {{ buildInputs = [ {options.attribute} ]; }}" with tempfile.TemporaryDirectory() as d: path = os.path.join(d, "default.nix") with open(path, "w") as f: f.write(expr) run(["nix-shell", path], stdout=None, check=False) def git_commit(git_dir: str, attribute: str, package: Package) -> None: run(["git", "-C", git_dir, "add", package.filename], stdout=None) diff = run(["git", "-C", git_dir, "diff", "--staged"]) if len(diff.stdout) == 0: print("No changes made, skip commit", file=sys.stderr) return new_version = package.new_version if new_version and package.old_version != new_version: if new_version.startswith("v"): new_version = new_version[1:] msg = f"{attribute}: {package.old_version} -> {new_version}" run( ["git", "-C", git_dir, "commit", "--verbose", "--message", msg], stdout=None ) else: with tempfile.NamedTemporaryFile(mode="w") as f: f.write(f"{attribute}:") f.flush() run( ["git", "-C", git_dir, "commit", "--verbose", "--template", f.name], stdout=None, ) def find_git_root(path: str) -> Optional[str]: prefix = [path] release_nix = [".git"] while True: root_path = os.path.join(*prefix) release_nix_path = os.path.join(root_path, *release_nix) if os.path.exists(release_nix_path): return root_path if os.path.abspath(root_path) == "/": return None prefix.append("..") def validate_git_dir(import_path: str) -> str: if os.path.isdir(import_path): git_dir = find_git_root(import_path) else: git_dir = find_git_root(os.path.basename(import_path)) if git_dir is None: die(f"Could not find a git repository relative to {import_path}") output = run(["git", "-C", git_dir, "diff", "--staged"]) if output.stdout != "": die( f"Please remove staged files before running {sys.argv[0]} with the commit flag" ) return git_dir def nix_run(options: Options) -> None: if is_nix_flakes(): cmd = ["nix", "shell", "--experimental-features", "nix-command"] else: cmd = ["nix", "run"] run( cmd + ["-f", options.import_path, options.attribute], stdout=None, check=False, ) def nix_build(options: Options) -> None: cmd = ["nix", "build"] if is_nix_flakes(): cmd += ["--experimental-features", "nix-command"] run( cmd + ["-f", options.import_path, options.attribute], stdout=None, check=False, ) def main() -> None: options = parse_args() if not os.path.exists(options.import_path): die(f"path {options.import_path} does not exists") if options.commit: git_dir = validate_git_dir(options.import_path) package = update(options) if options.build: nix_build(options) if options.run: nix_run(options) if options.shell: nix_shell(options) if options.commit: git_commit(git_dir, options.attribute, package) if __name__ == "__main__": main()
29.64375
98
0.608897
75e868eb23df6378e98d42ac9ce39571627b83f4
5,181
py
Python
auralib/welldb.py
whamlyn/auralib
01d64e25018fa249b3f901700428e9cb211d803c
[ "Apache-2.0" ]
20
2016-09-12T23:10:39.000Z
2021-05-06T01:29:45.000Z
auralib/welldb.py
kwinkunks/auralib
8300bb0c4d20156b9539df6d6c5e380f52572c4c
[ "Apache-2.0" ]
6
2016-12-02T01:56:49.000Z
2020-10-27T23:55:58.000Z
auralib/welldb.py
kwinkunks/auralib
8300bb0c4d20156b9539df6d6c5e380f52572c4c
[ "Apache-2.0" ]
8
2017-11-09T20:30:06.000Z
2021-03-26T23:45:43.000Z
# -*- coding: utf-8 -*- """ Created on Sun Oct 4 14:46:29 2020 @author: wesha """ #%% import numpy as np import matplotlib.pyplot as plt import auralib as aura import os import shutil def create_well(welldb, wellid, overwrite=False): """ Function to create a new well """ curwellpath = os.path.join(welldb, wellid) if os.path.exists(curwellpath): if overwrite == False: print('Path already exists:\n%s' % curwellpath) ans = input('Overwrite? (Y/[N]): ') if (len(ans)==0) | (ans.lower()=='n'): print('Skipping well %s' % curwellpath) else: overwrite = True elif overwrite==True: shutil.rmtree(curwellpath) os.mkdir(curwellpath) os.mkdir(os.path.join(curwellpath, 'logs')) os.mkdir(os.path.join(curwellpath, 'dev')) os.mkdir(os.path.join(curwellpath, 'td')) os.mkdir(os.path.join(curwellpath, 'markers')) os.mkdir(os.path.join(curwellpath, 'synth')) os.mkdir(os.path.join(curwellpath, 'gathers')) well_header = {'WELLID': wellid, 'TOPX': 0.0, 'TOPY': 0.0, 'KB': 0.0, 'GL': 0.0} with open(os.path.join(curwellpath, 'well_header.csv'), 'w') as fd: for key in well_header: line = '%s,%s\n' % (key, well_header[key]) fd.write(line) # initialize markers file with open(os.path.join(curwellpath, 'markers', 'markers.csv'), 'w') as fd: fd.write('WELLID,%s\n' % wellid) fd.write('#NAME,MD\n') def create_las_logs(welldb, lasfile, setname='LAS', ztype='md', wellidfield='WELL', create_missing_well=True): """ Function to load logs from LAS to a database well """ # create LASReader object buf = aura.las.LASReader(lasfile) # get some important constants wellid = buf.well_info[wellidfield]['data'] strt = buf.well_info['STRT']['data'] stop = buf.well_info['STOP']['data'] step = buf.well_info['STEP']['data'] zunit = buf.well_info['STRT']['unit'].lower() ztype = ztype.lower() # If the well doesn't exist in the database, create it now if os.path.exists(os.path.join(welldb, wellid)) == False: if create_missing_well == True: print('Well %s does not exist in DB. Creating now...\n' % wellid) create_well(welldb, wellid) else: print('Well %s does not exist in DB. Skipping...\n' % wellid) # If the log SET directory doesn't exist, create it now too logsdir = os.path.join(welldb, wellid, 'logs') setdir = os.path.join(logsdir, setname) if os.path.exists(setdir) == False: os.mkdir(setdir) # Write logs containg in LAS file to the log SET directory for logname in buf.curves.keys(): logunit = buf.curve_info[logname]['unit'] logdesc = buf.curve_info[logname]['desc'] logfile = os.path.join(setdir, logname+'.csv') with open(logfile, 'w') as fd: fd.write('#WELL,%s\n' % wellid) fd.write('#ZTYPE,%s\n' % ztype) fd.write('#ZUNIT,%s\n' % zunit) fd.write('#STRT,%f\n' % strt) fd.write('#STOP,%f\n' % stop) fd.write('#STEP,%f\n' % step) fd.write('#UNIT,%s\n' % logunit) fd.write('#DESC,%s\n' % logdesc) fd.write('~DATA\n') zlog = np.arange(strt, stop+step, step) for z, data in zip(zlog, buf.curves[logname]): fd.write('%f,%f\n' % (z, data)) def add_top(welldb, wellid, z, name): """ Add well markers for well. """ well_exists = os.path.exists(os.path.join(welldb, wellid)) if well_exists: tops_file = os.path.join(welldb, wellid, 'markers', 'markers.csv') with open(tops_file, 'a') as fd: fd.seek(0, 2) if type(name) == str: line = '%s,%f\n' % (name, z) fd.write(line) else: for i in range(len(z)): line = '%s,%f\n' % (name[i], z[i]) fd.write(line) else: print('Well %s does not exist in database, skipping...\n' % wellid) def update_well_header(welldb, wellid, hdr_name, hdr_value): """ Update and/or add new well header fields """ curwellpath = os.path.join(welldb, wellid) well_exists = os.path.exists(curwellpath) if well_exists: with open(os.path.join(curwellpath, 'well_header.csv'), 'a') as fd: buf = fd.readlines() header = {} for line in buf: line = line.split() header[line[0]] = line[1] for i in range(len(hdr_name)): if type(hdr_value[i]) == str: header[hdr_name[i]] = hdr_value[i] else: header[hdr_name[i]] = '%f' % hdr_value[i] with open(os.path.join(curwellpath, 'well_header.csv'), 'w') as fd: for key in header.keys(): line = '%s,%s\n' % (key, header[key]) fd.write(line)
31.210843
83
0.541787
86b21f2b039009597a9a5987faf0c36b69e628e0
1,210
py
Python
tests/test_utils.py
do-py-together/mocking-utils
b962391a5e2b768afc2e0b6fbb67cbe382de7ae4
[ "MIT" ]
1
2020-12-03T06:16:25.000Z
2020-12-03T06:16:25.000Z
tests/test_utils.py
timdaviss/mocking-utils
b962391a5e2b768afc2e0b6fbb67cbe382de7ae4
[ "MIT" ]
3
2020-12-15T13:24:53.000Z
2021-09-09T22:47:08.000Z
tests/test_utils.py
do-py-together/mocking-utils
b962391a5e2b768afc2e0b6fbb67cbe382de7ae4
[ "MIT" ]
null
null
null
""" :date_created: 2020-12-06 """ import pytest from mocking_utils.utils import powerset, powerset_concat @pytest.mark.parametrize('iterable, empty_is_valid, expected_output', [ ([1, 2, 3], True, [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]), ([1, 2, 3], False, [(1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]), ]) def test_powerset(iterable, empty_is_valid, expected_output): """ :type iterable: list :type empty_is_valid: bool :type expected_output: list of tuple """ assert list(powerset(iterable, empty_is_valid=empty_is_valid)) == expected_output @pytest.mark.parametrize('iterable, empty_is_valid, expected_output', [ (['a', 'b', 'c'], True, ['', 'a', 'b', 'c', 'a,b', 'a,c', 'b,c', 'a,b,c']), (['a', 'b', 'c'], False, ['a', 'b', 'c', 'a,b', 'a,c', 'b,c', 'a,b,c']), pytest.param([1, 2, 3], False, [], marks=pytest.mark.xfail(raises=TypeError)) ]) def test_powerset_concat(iterable, empty_is_valid, expected_output): """ :type iterable: list of str :type empty_is_valid: bool :type expected_output: list of str """ assert list(powerset_concat(iterable, empty_is_valid=empty_is_valid)) == expected_output
35.588235
92
0.601653
b2dae2511ec87ae3e7e3860b05d41621b82b13de
16,900
py
Python
bigquery/google/cloud/bigquery/magics.py
beittatt/cloud-python
cdb4cc4f3c568ff32acf35c34910d23f2d3800a0
[ "Apache-2.0" ]
2
2021-11-26T07:08:43.000Z
2022-03-07T20:20:04.000Z
bigquery/google/cloud/bigquery/magics.py
beittatt/cloud-python
cdb4cc4f3c568ff32acf35c34910d23f2d3800a0
[ "Apache-2.0" ]
null
null
null
bigquery/google/cloud/bigquery/magics.py
beittatt/cloud-python
cdb4cc4f3c568ff32acf35c34910d23f2d3800a0
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """IPython Magics .. function:: %%bigquery IPython cell magic to run a query and display the result as a DataFrame .. code-block:: python %%bigquery [<destination_var>] [--project <project>] [--use_legacy_sql] [--verbose] [--params <params>] <query> Parameters: * ``<destination_var>`` (optional, line argument): variable to store the query results. The results are not displayed if this parameter is used. * ``--project <project>`` (optional, line argument): Project to use for running the query. Defaults to the context :attr:`~google.cloud.bigquery.magics.Context.project`. * ``--use_bqstorage_api`` (optional, line argument): Downloads the DataFrame using the BigQuery Storage API. To use this option, install the ``google-cloud-bigquery-storage`` and ``fastavro`` packages, and `enable the BigQuery Storage API <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`_. * ``--use_legacy_sql`` (optional, line argument): Runs the query using Legacy SQL syntax. Defaults to Standard SQL if this argument not used. * ``--verbose`` (optional, line argument): If this flag is used, information including the query job ID and the amount of time for the query to complete will not be cleared after the query is finished. By default, this information will be displayed but will be cleared after the query is finished. * ``--params <params>`` (optional, line argument): If present, the argument following the ``--params`` flag must be either: * :class:`str` - A JSON string representation of a dictionary in the format ``{"param_name": "param_value"}`` (ex. ``{"num": 17}``). Use of the parameter in the query should be indicated with ``@param_name``. See ``In[5]`` in the Examples section below. * :class:`dict` reference - A reference to a ``dict`` in the format ``{"param_name": "param_value"}``, where the value types must be JSON serializable. The variable reference is indicated by a ``$`` before the variable name (ex. ``$my_dict_var``). See ``In[6]`` and ``In[7]`` in the Examples section below. * ``<query>`` (required, cell argument): SQL query to run. Returns: A :class:`pandas.DataFrame` with the query results. .. note:: All queries run using this magic will run using the context :attr:`~google.cloud.bigquery.magics.Context.credentials`. Examples: The following examples can be run in an IPython notebook after loading the bigquery IPython extension (see ``In[1]``) and setting up Application Default Credentials. .. code-block:: none In [1]: %load_ext google.cloud.bigquery In [2]: %%bigquery ...: SELECT name, SUM(number) as count ...: FROM `bigquery-public-data.usa_names.usa_1910_current` ...: GROUP BY name ...: ORDER BY count DESC ...: LIMIT 3 Out[2]: name count ...: ------------------- ...: 0 James 4987296 ...: 1 John 4866302 ...: 2 Robert 4738204 In [3]: %%bigquery df --project my-alternate-project --verbose ...: SELECT name, SUM(number) as count ...: FROM `bigquery-public-data.usa_names.usa_1910_current` ...: WHERE gender = 'F' ...: GROUP BY name ...: ORDER BY count DESC ...: LIMIT 3 Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b Query executing: 2.61s Query complete after 2.92s In [4]: df Out[4]: name count ...: ---------------------- ...: 0 Mary 3736239 ...: 1 Patricia 1568495 ...: 2 Elizabeth 1519946 In [5]: %%bigquery --params {"num": 17} ...: SELECT @num AS num Out[5]: num ...: ------- ...: 0 17 In [6]: params = {"num": 17} In [7]: %%bigquery --params $params ...: SELECT @num AS num Out[7]: num ...: ------- ...: 0 17 """ from __future__ import print_function import ast import sys import time from concurrent import futures try: import IPython from IPython import display from IPython.core import magic_arguments except ImportError: # pragma: NO COVER raise ImportError("This module can only be loaded in IPython.") from google.api_core import client_info import google.auth from google.cloud import bigquery from google.cloud.bigquery.dbapi import _helpers import six IPYTHON_USER_AGENT = "ipython-{}".format(IPython.__version__) class Context(object): """Storage for objects to be used throughout an IPython notebook session. A Context object is initialized when the ``magics`` module is imported, and can be found at ``google.cloud.bigquery.magics.context``. """ def __init__(self): self._credentials = None self._project = None self._connection = None self._use_bqstorage_api = None self._default_query_job_config = bigquery.QueryJobConfig() @property def credentials(self): """google.auth.credentials.Credentials: Credentials to use for queries performed through IPython magics Note: These credentials do not need to be explicitly defined if you are using Application Default Credentials. If you are not using Application Default Credentials, manually construct a :class:`google.auth.credentials.Credentials` object and set it as the context credentials as demonstrated in the example below. See `auth docs`_ for more information on obtaining credentials. Example: Manually setting the context credentials: >>> from google.cloud.bigquery import magics >>> from google.oauth2 import service_account >>> credentials = (service_account ... .Credentials.from_service_account_file( ... '/path/to/key.json')) >>> magics.context.credentials = credentials .. _auth docs: http://google-auth.readthedocs.io /en/latest/user-guide.html#obtaining-credentials """ if self._credentials is None: self._credentials, _ = google.auth.default() return self._credentials @credentials.setter def credentials(self, value): self._credentials = value @property def project(self): """str: Default project to use for queries performed through IPython magics Note: The project does not need to be explicitly defined if you have an environment default project set. If you do not have a default project set in your environment, manually assign the project as demonstrated in the example below. Example: Manually setting the context project: >>> from google.cloud.bigquery import magics >>> magics.context.project = 'my-project' """ if self._project is None: _, self._project = google.auth.default() return self._project @project.setter def project(self, value): self._project = value @property def use_bqstorage_api(self): """bool: [Beta] Set to True to use the BigQuery Storage API to download query results To use this option, install the ``google-cloud-bigquery-storage`` and ``fastavro`` packages, and `enable the BigQuery Storage API <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`_. """ return self._use_bqstorage_api @use_bqstorage_api.setter def use_bqstorage_api(self, value): self._use_bqstorage_api = value @property def default_query_job_config(self): """google.cloud.bigquery.job.QueryJobConfig: Default job configuration for queries. The context's :class:`~google.cloud.bigquery.job.QueryJobConfig` is used for queries. Some properties can be overridden with arguments to the magics. Example: Manually setting the default value for ``maximum_bytes_billed`` to 100 MB: >>> from google.cloud.bigquery import magics >>> magics.context.default_query_job_config.maximum_bytes_billed = 100000000 """ return self._default_query_job_config @default_query_job_config.setter def default_query_job_config(self, value): self._default_query_job_config = value context = Context() def _run_query(client, query, job_config=None): """Runs a query while printing status updates Args: client (google.cloud.bigquery.client.Client): Client to bundle configuration needed for API requests. query (str): SQL query to be executed. Defaults to the standard SQL dialect. Use the ``job_config`` parameter to change dialects. job_config (google.cloud.bigquery.job.QueryJobConfig, optional): Extra configuration options for the job. Returns: google.cloud.bigquery.job.QueryJob: the query job created Example: >>> client = bigquery.Client() >>> _run_query(client, "SELECT 17") Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b Query executing: 1.66s Query complete after 2.07s 'bf633912-af2c-4780-b568-5d868058632b' """ start_time = time.time() query_job = client.query(query, job_config=job_config) if job_config and job_config.dry_run: return query_job print("Executing query with job ID: {}".format(query_job.job_id)) while True: print("\rQuery executing: {:0.2f}s".format(time.time() - start_time), end="") try: query_job.result(timeout=0.5) break except futures.TimeoutError: continue print("\nQuery complete after {:0.2f}s".format(time.time() - start_time)) return query_job @magic_arguments.magic_arguments() @magic_arguments.argument( "destination_var", nargs="?", help=("If provided, save the output to this variable instead of displaying it."), ) @magic_arguments.argument( "--project", type=str, default=None, help=("Project to use for executing this query. Defaults to the context project."), ) @magic_arguments.argument( "--maximum_bytes_billed", default=None, help=( "maximum_bytes_billed to use for executing this query. Defaults to " "the context default_query_job_config.maximum_bytes_billed." ), ) @magic_arguments.argument( "--dry_run", action="store_true", default=False, help=( "Sets query to be a dry run to estimate costs. " "Defaults to executing the query instead of dry run if this argument is not used." ), ) @magic_arguments.argument( "--use_legacy_sql", action="store_true", default=False, help=( "Sets query to use Legacy SQL instead of Standard SQL. Defaults to " "Standard SQL if this argument is not used." ), ) @magic_arguments.argument( "--use_bqstorage_api", action="store_true", default=False, help=( "[Beta] Use the BigQuery Storage API to download large query results. " "To use this option, install the google-cloud-bigquery-storage and " "fastavro packages, and enable the BigQuery Storage API." ), ) @magic_arguments.argument( "--verbose", action="store_true", default=False, help=( "If set, print verbose output, including the query job ID and the " "amount of time for the query to finish. By default, this " "information will be displayed as the query runs, but will be " "cleared after the query is finished." ), ) @magic_arguments.argument( "--params", nargs="+", default=None, help=( "Parameters to format the query string. If present, the --params " "flag should be followed by a string representation of a dictionary " "in the format {'param_name': 'param_value'} (ex. {\"num\": 17}), " "or a reference to a dictionary in the same format. The dictionary " "reference can be made by including a '$' before the variable " "name (ex. $my_dict_var)." ), ) def _cell_magic(line, query): """Underlying function for bigquery cell magic Note: This function contains the underlying logic for the 'bigquery' cell magic. This function is not meant to be called directly. Args: line (str): "%%bigquery" followed by arguments as required query (str): SQL query to run Returns: pandas.DataFrame: the query results. """ args = magic_arguments.parse_argstring(_cell_magic, line) params = [] if args.params is not None: try: params = _helpers.to_query_parameters( ast.literal_eval("".join(args.params)) ) except Exception: raise SyntaxError( "--params is not a correctly formatted JSON string or a JSON " "serializable dictionary" ) project = args.project or context.project client = bigquery.Client( project=project, credentials=context.credentials, default_query_job_config=context.default_query_job_config, client_info=client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT), ) if context._connection: client._connection = context._connection bqstorage_client = _make_bqstorage_client( args.use_bqstorage_api or context.use_bqstorage_api, context.credentials ) job_config = bigquery.job.QueryJobConfig() job_config.query_parameters = params job_config.use_legacy_sql = args.use_legacy_sql job_config.dry_run = args.dry_run if args.maximum_bytes_billed == "None": job_config.maximum_bytes_billed = 0 elif args.maximum_bytes_billed is not None: value = int(args.maximum_bytes_billed) job_config.maximum_bytes_billed = value error = None try: query_job = _run_query(client, query, job_config) except Exception as ex: error = str(ex) if not args.verbose: display.clear_output() if error: if args.destination_var: print( "Could not save output to variable '{}'.".format(args.destination_var), file=sys.stderr, ) print("\nERROR:\n", error, file=sys.stderr) return if args.dry_run and args.destination_var: IPython.get_ipython().push({args.destination_var: query_job}) return elif args.dry_run: print( "Query validated. This query will process {} bytes.".format( query_job.total_bytes_processed ) ) return query_job result = query_job.to_dataframe(bqstorage_client=bqstorage_client) if args.destination_var: IPython.get_ipython().push({args.destination_var: result}) else: return result def _make_bqstorage_client(use_bqstorage_api, credentials): if not use_bqstorage_api: return None try: from google.cloud import bigquery_storage_v1beta1 except ImportError as err: customized_error = ImportError( "Install the google-cloud-bigquery-storage and pyarrow packages " "to use the BigQuery Storage API." ) six.raise_from(customized_error, err) try: from google.api_core.gapic_v1 import client_info as gapic_client_info except ImportError as err: customized_error = ImportError( "Install the grpcio package to use the BigQuery Storage API." ) six.raise_from(customized_error, err) return bigquery_storage_v1beta1.BigQueryStorageClient( credentials=credentials, client_info=gapic_client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT), )
34.141414
90
0.63716
98b0311d450e6355d2d66f737c2e8a10f6038e4f
313
py
Python
ezflow/data/dataset/__init__.py
NeelayS/ezflow
b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
[ "MIT" ]
94
2021-11-18T18:31:18.000Z
2022-03-04T02:30:13.000Z
ezflow/data/dataset/__init__.py
NeelayS/ezflow
b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
[ "MIT" ]
72
2021-11-19T16:59:10.000Z
2022-03-02T14:39:10.000Z
ezflow/data/dataset/__init__.py
neu-vig/ezflow
1eb6f675e72b1de6db7b35d61ca4ef0082bae890
[ "MIT" ]
5
2021-11-18T18:42:38.000Z
2022-03-03T11:35:26.000Z
from .autoflow import AutoFlow from .base_dataset import BaseDataset from .driving import Driving from .flying_chairs import FlyingChairs from .flying_things3d import FlyingThings3D, FlyingThings3DSubset from .hd1k import HD1K from .kitti import Kitti from .monkaa import Monkaa from .mpi_sintel import MPISintel
31.3
65
0.84984
e1d459d48ee615fc0d22fb08f8b69ce431c3f966
5,569
py
Python
predict.py
Spencerfar/djin-aging
f6513226e879e6061996d819b4de0e2873860fbc
[ "MIT" ]
3
2021-08-24T08:33:45.000Z
2022-01-18T23:50:33.000Z
predict.py
Spencerfar/djin-aging
f6513226e879e6061996d819b4de0e2873860fbc
[ "MIT" ]
null
null
null
predict.py
Spencerfar/djin-aging
f6513226e879e6061996d819b4de0e2873860fbc
[ "MIT" ]
1
2021-08-24T08:34:30.000Z
2021-08-24T08:34:30.000Z
import argparse import torch from torch.nn import functional as F import numpy as np from scipy.stats import sem from pandas import read_csv from torch.utils import data from Model.model import Model from Utils.record import record from DataLoader.dataset import Dataset from DataLoader.collate import custom_collate parser = argparse.ArgumentParser('Predict') parser.add_argument('--job_id', type=int) parser.add_argument('--epoch', type=int) parser.add_argument('--gamma_size', type=int, default = 25) parser.add_argument('--z_size', type=int, default = 20) parser.add_argument('--decoder_size', type=int, default = 65) parser.add_argument('--Nflows', type=int, default = 3) parser.add_argument('--flow_hidden', type=int, default = 24) parser.add_argument('--f_nn_size', type=int, default = 12) parser.add_argument('--W_prior_scale', type=float, default = 0.1) args = parser.parse_args() torch.set_num_threads(6) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') N = 29 sims = 250 dt = 0.5 length = 50 pop_avg = np.load('Data/Population_averages.npy') pop_avg_env = np.load('Data/Population_averages_env.npy') pop_std = np.load('Data/Population_std.npy') pop_avg_ = torch.from_numpy(pop_avg[...,1:]).float() pop_avg_env = torch.from_numpy(pop_avg_env).float() pop_std = torch.from_numpy(pop_std[...,1:]).float() pop_avg_bins = np.arange(40, 105, 3)[:-2] test_name = 'Data/test.csv' test_set = Dataset(test_name, N, pop=False, min_count=10) num_test = 400 test_generator = data.DataLoader(test_set, batch_size = num_test, shuffle = False, collate_fn = lambda x: custom_collate(x, pop_avg_, pop_avg_env, pop_std, 1.0)) mean_T = test_set.mean_T std_T = test_set.std_T mean_deficits = torch.Tensor(read_csv('Data/mean_deficits.txt', index_col=0,sep=',',header=None).values[1:-3].flatten()) std_deficits = torch.Tensor(read_csv('Data/std_deficits.txt', index_col=0,sep=',',header=None, names = ['variable']).values[1:-3].flatten()) model = Model(device, N, args.gamma_size, args.z_size, args.decoder_size, args.Nflows, args.flow_hidden, args.f_nn_size, mean_T, std_T, dt, length).to(device) model.load_state_dict(torch.load('Parameters/train%d_Model_DJIN_epoch%d.params'%(args.job_id, args.epoch),map_location=device)) model = model.eval() mean_results = np.zeros((test_set.__len__(), 100, N+1)) * np.nan std_results = np.zeros((test_set.__len__(), 100, N+1)) * np.nan S_results = np.zeros((test_set.__len__(), 100, 3)) * np.nan with torch.no_grad(): sigma_posterior = torch.distributions.gamma.Gamma(model.logalpha.exp(), model.logbeta.exp()) start = 0 for data in test_generator: size = data['Y'].shape[0] X = torch.zeros(sims, size, int(length/dt), N).to(device) X_std = torch.zeros(sims, size, int(length/dt), N).to(device) S = torch.zeros(sims, size, int(length/dt)).to(device) alive = torch.ones(sims, size, int(length/dt)).to(device) for s in range(sims): sigma_y = sigma_posterior.sample((data['Y'].shape[0], length*2)) pred_X, t, pred_S, pred_logGamma, pred_sigma_X, context, y, times, mask, survival_mask, dead_mask, after_dead_mask, censored, sample_weights, med, env, z_sample, prior_entropy, log_det, recon_mean_x0, drifts, mask0, W = model(data, sigma_y, test=True) X[s] = pred_X X_std[s] = pred_X + sigma_y*torch.randn_like(pred_X) S[s] = pred_S.exp() alive[s,:,1:] = torch.cumprod(torch.bernoulli(torch.exp(-1*pred_logGamma.exp()[:,:-1]*dt)), dim=1) t0 = t[:,0] record_times = [torch.from_numpy(np.arange(t0[b].cpu(), 121, 1)).to(device) for b in range(size)] X_record, S_record = record(t, X, S, record_times, dt) X_std_record, alive_record = record(t, X_std, alive, record_times, dt) t0 = t0.cpu() X_sum = [] X_sum_std = [] X_sum2 = [] X_count = [] for b in range(size): X_sum.append(torch.sum(X_record[b].permute(2,0,1)*alive_record[b], dim = 1).cpu()) X_sum_std.append(torch.sum(X_std_record[b].permute(2,0,1)*alive_record[b], dim = 1).cpu()) X_sum2.append(torch.sum(X_std_record[b].pow(2).permute(2,0,1)*alive_record[b], dim = 1).cpu()) X_count.append(torch.sum(alive_record[b], dim = 0).cpu()) for b in range(size): mean_results[start+b, :len(np.arange(t0[b], 121, 1)), 0] = np.arange(t0[b], 121, 1) std_results[start+b, :len(np.arange(t0[b], 121, 1)), 0] = np.arange(t0[b], 121, 1) S_results[start+b, :len(np.arange(t0[b], 121, 1)), 0] = np.arange(t0[b], 121, 1) mean_results[start+b, :X_sum[b].shape[1], 1:] = (X_sum[b]/X_count[b]).permute(1,0).numpy() std_results[start+b, :X_sum_std[b].shape[1], 1:] = np.sqrt((X_sum2[b]/X_count[b] - (X_sum_std[b]/X_count[b]).pow(2)).permute(1,0).numpy()) S_results[start+b, :len(np.arange(t0[b], 121, 1)), 1] = torch.mean(S_record[b], dim = 0) S_results[start+b, :len(np.arange(t0[b], 121, 1)), 2] = torch.std(S_record[b], dim = 0) start += size np.save('Analysis_Data/Mean_trajectories_job_id%d_epoch%d_DJIN.npy'%(args.job_id, args.epoch), mean_results) np.save('Analysis_Data/Std_trajectories_job_id%d_epoch%d_DJIN.npy'%(args.job_id, args.epoch), std_results) np.save('Analysis_Data/Survival_trajectories_job_id%d_epoch%d_DJIN.npy'%(args.job_id, args.epoch), S_results)
45.276423
263
0.659544
a48d6beb11518df9a085486a5f73193f6c3ab418
2,379
py
Python
openingangle/123.py
DingdingLuan/PycharmProjects
055944cd4bf66637e40d81d8ccf05216006dc0a3
[ "Apache-2.0" ]
null
null
null
openingangle/123.py
DingdingLuan/PycharmProjects
055944cd4bf66637e40d81d8ccf05216006dc0a3
[ "Apache-2.0" ]
null
null
null
openingangle/123.py
DingdingLuan/PycharmProjects
055944cd4bf66637e40d81d8ccf05216006dc0a3
[ "Apache-2.0" ]
null
null
null
import pandas as pd import numpy as np from Calculatefunction import* import matplotlib.pyplot as plt df=pd.read_excel("/Users/dingding/Desktop/calculate/3333.xlsx") grbname=df['grb'] z = df['z'] alpha=df['alpha'] beta=df['beta'] ep=df['ep'] s=df['s'] # print(type(s)) # print(type(df['s'])) i=0 grbname1=[] z1=[] alpha1=[] beta1=[] ep1=[] s1=[] luminositydistance1=[] egamma1=[] eiso1=[] k1=[] seita1=[] for i in range(6): seita1=np.append(seita1,seita(float(z[i]),float(ep[i]),float(s[i]),float(alpha[i]),float(beta[i]),15,350)) grbname1=np.append(grbname1,grbname[i]) z1=np.append(z1,z[i]) alpha1=np.append(alpha1,alpha[i]) beta1=np.append(beta1,beta[i]) ep1=np.append(ep1,ep[i]) s1=np.append(s1,s[i]) luminositydistance1=np.append(luminositydistance1,dl(z[i])) egamma1=np.append(egamma1,egamma(z[i],ep[i])) eiso1=np.append(eiso1,eiso(float(z[i]),float(ep[i]),float(s[i]),float(alpha[i]),float(beta[i]),15,350)) k1=np.append(k1,k(ep[i],z[i],alpha[i],beta[i],15,350)) print(k1) # dataframename=pd.DataFrame(grbname1) # dataframename.to_csv('/users/dingding/desktop/grbname.csv',sep=',') # # dataframename=pd.DataFrame(z1) # dataframename.to_csv('/users/dingding/desktop/z.csv',sep=',') # # dataframename=pd.DataFrame(alpha1) # dataframename.to_csv('/users/dingding/desktop/alpha.csv',sep=',') # # dataframename=pd.DataFrame(beta1) # dataframename.to_csv('/users/dingding/desktop/beta.csv',sep=',') # # dataframename=pd.DataFrame(ep1) # dataframename.to_csv('/users/dingding/desktop/ep.csv',sep=',') # # dataframename=pd.DataFrame(s1) # dataframename.to_csv('/users/dingding/desktop/s.csv',sep=',') # # dataframename=pd.DataFrame(luminositydistance1) # dataframename.to_csv('/users/dingding/desktop/ld.csv',sep=',') # # dataframename=pd.DataFrame(egamma1) # dataframename.to_csv('/users/dingding/desktop/egamma.csv',sep=',') # # dataframename=pd.DataFrame(eiso1) # dataframename.to_csv('/users/dingding/desktop/eiso.csv',sep=',') # # dataframename=pd.DataFrame(k1) # dataframename.to_csv('/users/dingding/desktop/k.csv',sep=',') # # dataframename=pd.DataFrame(seita1) # dataframename.to_csv('/users/dingding/desktop/seita.csv',sep=',') # plt.figure(figsize=(10, 5)) # plt.scatter(z, seita1, s=5, alpha=1, marker='o', c='r') # plt.xlim(0,3) # plt.title("Z-Eisocalculate") # plt.xlabel("z") # plt.ylabel('seita') # # plt.show()
27.662791
110
0.69567
5bac61dfa67f5c621a240a47a5eaeda3be3ecaf4
37,966
py
Python
Web/main.py
apratimshukla6/PharmaCat
1755155220cceb85febd9a142f12387ff522c604
[ "Apache-2.0" ]
15
2019-12-19T09:57:09.000Z
2022-02-25T09:13:17.000Z
Web/main.py
PharmaCat/Application
e050cf5812f0ea860c6359868bb532d49d3236cc
[ "Apache-2.0" ]
6
2020-03-24T17:45:14.000Z
2022-03-12T00:02:07.000Z
Web/main.py
Codebotics/PharmaCat
e050cf5812f0ea860c6359868bb532d49d3236cc
[ "Apache-2.0" ]
6
2020-06-02T03:12:30.000Z
2022-03-09T08:35:52.000Z
""" This script handles the execution of the Flask Web Server(Web Application + JSON API) """ from flask import Flask, render_template, request, redirect, url_for, session, jsonify, flash from flaskext.mysql import MySQL from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split from sklearn import tree from sklearn import model_selection from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import cross_val_score from googleplaces import GooglePlaces, types, lang from flask_socketio import SocketIO import pandas as pd import numpy as np import pickle import re import os import random import hashlib import bcrypt import json import requests import nltk import pybase64 from datetime import date from sklearn.preprocessing import normalize app = Flask(__name__) port = int(os.environ.get('PORT', 5000)) # Change this to your secret key (can be anything, it's for extra protection) app.secret_key = 'canada$God7972#' # Enter your database connection details below app.config['MYSQL_DATABASE_HOST'] = 'localhost' app.config['MYSQL_DATABASE_USER'] = 'root' app.config['MYSQL_DATABASE_PASSWORD '] = '' app.config['MYSQL_DATABASE_DB'] = 'pharmacat' # Intialize MySQL mysql = MySQL(autocommit=True) mysql.init_app(app) """-------------------------------Start of Pharmacat API for developers-------------------------------""" @app.route('/api/details/<token>',methods=['GET']) def detailsapi(token): tkn = pybase64.b64decode(token) r = tkn.decode('utf-8') str1 = r.split("(~)") username = str1[0] password = str1[1] cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE Username = %s', [username]) account = cursor.fetchone() details = [ { 'ID': account[0], 'Username': username, 'Email': account[3], 'FullName': account[4], 'Address': account[5], 'BloodGroup': account[6], 'Age': account[7] }] return jsonify({'Details': details}) @app.route('/api/login/<code>',methods=['GET']) def loginapi(code): code1 = code.split('~') username = code1[0] password = code1[1] cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE Username = %s', [username]) account = cursor.fetchone() if bcrypt.checkpw(password.encode('utf-8'), account[2].encode('utf-8')): token = account[8] return jsonify({'Token': token}) return jsonify({'Token': "Invalid Credentials"}) @app.route('/api/diagnosetext/<code>',methods=['GET']) def diagnosetextapi(code): code1 = code.split('~') rf="" for i in code1: rf=rf+i+" " filename = 'disease_predict.sav' feel = rf data = [feel] cv = pickle.load(open("vectorizer.pickle", 'rb')) #Load vectorizer loaded_model = pickle.load(open(filename, 'rb')) vect=cv.transform(data).toarray() p=loaded_model.predict(vect) return jsonify({'Disease': p[0]}) @app.route('/api/hospital/<token>',methods=['GET']) def hospital(token): tkn = pybase64.b64decode(token) r = tkn.decode('utf-8') str1 = r.split("(~)") username = str1[0] password = str1[1] cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE Username = %s', [username]) account = cursor.fetchone() API_KEY = 'your google api key' str1 = str(account[5]).split(",") l="" for i in range(0,len(str1)): l=l+str1[i]+"+" send_url = 'https://maps.googleapis.com/maps/api/geocode/json?address='+l+'&key='+API_KEY r = requests.get(send_url) j = json.loads(r.text) lat = j['results'][0]['geometry']['location']['lat'] lon = j['results'][0]['geometry']['location']['lng'] # Initialising the GooglePlaces constructor google_places = GooglePlaces(API_KEY) query_result = google_places.nearby_search( lat_lng ={'lat': lat, 'lng': lon}, radius = 5000, types =[types.TYPE_HOSPITAL]) places = [] # Iterate over the search results for place in query_result.places: places.append(place.name) return jsonify({'Hospitals': places}) @app.route('/api/symptoms/',methods=['GET']) def symptoms(): cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM symptoms ORDER BY Symptom_Name ASC') sym = cursor.fetchall() sym1=[] for i in sym: sym1.append(i) symptoms=[] for i in range(0,len(sym1)): symptoms.append(sym1[i][1]) return jsonify({'Symptoms': symptoms}) @app.route('/api/register/<code>',methods=['GET']) def registerapi(code): code1 = code.split('~') username = code1[0] password = code1[1] email = code1[2] full_name = code1[3] address = code1[4] blood = code1[5] age = code1[6] msg = '' # Check if account exists using MySQL cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE Username = %s', (username)) account = cursor.fetchone() # If account exists show error and validation checks if account: msg = 'Account already exists!' return jsonify({'Message': msg}) elif not re.match(r'[^@]+@[^@]+\.[^@]+', email): msg = 'Invalid email address!' return jsonify({'Message': msg}) elif not re.match(r'[A-Za-z0-9]+', username): msg = 'Username must contain only characters and numbers!' return jsonify({'Message': msg}) elif not username or not password or not email: msg = 'Please fill out the form!' return jsonify({'Message': msg}) else: comb = username+'(~)'+password s = comb.encode() s1 = pybase64.b64encode(s) api=s1.decode('utf-8') hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) cursor.execute('INSERT INTO users VALUES (NULL, %s, %s, %s, %s, %s, %s, %s, %s)', (username, hashed_password, email, full_name, address, blood, age, api)) msg = 'You have successfully registered!' return jsonify({'Message': msg}) @app.route('/api/diagnosesym/<code>',methods=['GET']) #n~symptoms def diagnosesym(code): code1 = code.split('~') n = int(code1[0]) l=[] for i in range(1,n): l.append(code1[i]) data = pd.read_csv("Manual-Data/Training.csv") df = pd.DataFrame(data) cols = df.columns cols = cols[:-1] x = df[cols] y = df['prognosis'] features = cols feature_dict = {} filename = 'finalized_model.sav' for i,f in enumerate(features): feature_dict[f] = i for i in l: s=i m=feature_dict[s] if (m!=0): sample_x = [i/m if i ==m else i*0 for i in range(len(features))] loaded_model = pickle.load(open(filename, 'rb')) sample_x = np.array(sample_x).reshape(1,len(sample_x)) p_disease=loaded_model.predict(sample_x) answer = p_disease[0] cursor1 = mysql.get_db().cursor() cursor1.execute('SELECT * FROM medicine WHERE Disease = %s', [answer]) medicine = cursor1.fetchone() cursor2 = mysql.get_db().cursor() cursor2.execute('SELECT * FROM doctor_fields WHERE Disease = %s', [answer]) special = cursor2.fetchone() return jsonify({'Disease': answer, 'Medicine': medicine[2], 'Doctor': special[2]}) """-------------------------------End of Pharmacat API for developers-------------------------------""" """-------------------------------Start of Web Application-------------------------------""" #Homepage @app.route('/') def index(): if 'loggedin' not in session: return render_template('index.html') else: return home() #Dashboard @app.route('/dashboard') def home(): # Check if user is loggedin if 'loggedin' in session: # User is loggedin show them the home page if(session['isdoctor']==0): cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) else: cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) account = cursor.fetchone() cursor1 = mysql.get_db().cursor() records = cursor.execute('SELECT * FROM users') return render_template('dashboard.html', account = account, num = records,isdoctor=session['isdoctor']) # User is not loggedin redirect to login page return redirect(url_for('login')) #Patient Login @app.route('/login', methods=['GET', 'POST']) def login(): if 'loggedin' not in session: # Output message if something goes wrong... msg = None # Check if "username" and "password" POST requests exist (user submitted form) if request.method == 'POST' and 'username' in request.form and 'password' in request.form: # Create variables for easy access username = request.form['username'] password = request.form['password'] if(username and password): # Check if account exists using MySQL cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE Username = %s', (username)) # Fetch one record and return result account = cursor.fetchone() # If account exists in accounts table in out database if account: if bcrypt.checkpw(password.encode('utf-8'), account[2].encode('utf-8')): # Create session data, we can access this data in other routes session['loggedin'] = True session['id'] = account[0] session['username'] = account[1] session['api'] = account[8] session['isdoctor'] = 0 # Redirect to dashboard return home() else: # Account doesnt exist or username/password incorrect msg = 'Incorrect username/password!' flash(msg) else: # Account doesnt exist or username/password incorrect msg = 'Incorrect username/password!' flash(msg) else: msg = 'Please provide both username and password!' flash(msg) # Show the login form with message (if any) else: return home() return render_template('patientlogin.html', msg=msg) #Patient Register @app.route('/register', methods=['GET', 'POST']) def register(): # Output message if something goes wrong... msg = '' if('loggedin' not in session): # Check if "username", "password" and "email" POST requests exist (user submitted form) if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form: # Create variables for easy access username = request.form['username'] password = request.form['password'] email = request.form['email'] full_name = request.form['full_name'] address = request.form['address'] age = request.form['age'] blood = request.form['blood'] if(username and password and email and full_name and address and age and blood): # Check if account exists using MySQL cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE Username = %s', (username)) account = cursor.fetchone() # If account exists show error and validation checks if account: msg = 'Account already exists!' flash(msg) elif not re.match(r'[^@]+@[^@]+\.[^@]+', email): msg = 'Invalid email address!' flash(msg) elif not re.match(r'[A-Za-z0-9]+', username): msg = 'Username must contain only characters and numbers!' flash(msg) else: # Account doesnt exists and the form data is valid, now insert new account into users table apistr = username; result = hashlib.md5(apistr.encode()) comb = username+'(~)'+password s = comb.encode() s1 = pybase64.b64encode(s) api=s1.decode('utf-8') hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) cursor.execute('INSERT INTO users VALUES (NULL, %s, %s, %s, %s, %s, %s, %s, %s)', (username, hashed_password, email, full_name, address, blood, age, api)) cursor.execute('SELECT * FROM users WHERE Username = %s', (username)) # Fetch one record and return result account = cursor.fetchone() session['loggedin'] = True session['id'] = account[0] session['username'] = account[1] session['api'] = account[8] session['isdoctor'] = 0 msg = 'You have successfully registered!' return home() else: msg = 'Please fill out the form!' flash(msg) elif request.method == 'POST': # Form is empty... (no POST data) msg = 'Please fill out the form!' # Show registration form with message (if any) else: return home() return render_template('patientlogin.html', msg=msg) #Doctor Register @app.route('/docregister', methods=['GET', 'POST']) def docregister(): if 'loggedin' not in session: # Output message if something goes wrong... msg = '' # Check if "username", "password" and "email" POST requests exist (user submitted form) if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form: # Create variables for easy access username = request.form['username'] password = request.form['password'] email = request.form['email'] full_name = request.form['full_name'] registration_number = request.form['registration_number'] contact_number = request.form['contact_number'] spec = request.form['specialization'] address = request.form['address'] if(username and password and email and full_name and registration_number and contact_number and spec and address): # Check if account exists using MySQL cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE Username = %s', (username)) account = cursor.fetchone() # If account exists show error and validation checks if account: msg = 'Account already exists!' flash(msg) elif not re.match(r'[^@]+@[^@]+\.[^@]+', email): msg = 'Invalid email address!' flash(msg) elif not re.match(r'[A-Za-z0-9]+', username): msg = 'Username must contain only characters and numbers!' flash(msg) else: # Account doesnt exists and the form data is valid, now insert new account into users table hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) cursor.execute('INSERT INTO doctors VALUES (NULL, %s, %s, %s, %s, %s, %s, %s, %s, %s)', ( username, hashed_password, email, full_name, registration_number, contact_number, "Default Hospital" , spec, address )) msg = 'You have successfully registered!' cursor.execute('SELECT * FROM doctors WHERE Username = %s', (username)) # Fetch one record and return result account = cursor.fetchone() session['loggedin'] = True session['id'] = account[0] session['username'] = account[1] session['isdoctor'] = 1 return home() else: msg = 'Please fill out the form!' flash(msg) elif request.method == 'POST': # Form is empty... (no POST data) msg = 'Please fill out the form!' else: return home() # Show registration form with message (if any) return render_template('doctorlogin.html', msg=msg) #Doctor Login @app.route('/doclogin', methods=['GET', 'POST']) def doclogin(): if 'loggedin' not in session: # Output message if something goes wrong... msg = '' # Check if "username" and "password" POST requests exist (user submitted form) if request.method == 'POST' and 'username' in request.form and 'password' in request.form: # Create variables for easy access username = request.form['username'] password = request.form['password'] if(username and password): # Check if account exists using MySQL cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE Username = %s', (username)) # Fetch one record and return result account = cursor.fetchone() # If account exists in accounts table in out database if account: if bcrypt.checkpw(password.encode('utf-8'), account[2].encode('utf-8')): # Create session data, we can access this data in other routes session['loggedin'] = True session['id'] = account[0] session['username'] = account[1] session['isdoctor'] = 1 # Redirect to home page return home() else: # Account doesnt exist or username/password incorrect msg = 'Incorrect username/password!' flash(msg) else: # Account doesnt exist or username/password incorrect msg = 'Incorrect username/password!' flash(msg) else: msg = 'Please provide both username and password!' flash(msg) else: return home() # Show the login form with message (if any) return render_template('doctorlogin.html', msg=msg) #BMI for the dashboard(Written by Mayank) @app.route('/bmi',methods=['GET', 'POST']) def bmi(): if 'loggedin' in session: result=0 cursor = mysql.get_db().cursor() if session["isdoctor"]: cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) else: cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() if request.method=='POST': h=request.form["height"] w=request.form["weight"] if h and w: h=float(h) h = h/100 w=float(w) result=w/(h*h) result=round(result,2) return render_template('bmi.html',ans=result,account=account, height=h, weight=w) else: msg = 'Please provide height and weight' flash(msg) return render_template('bmi.html',ans=result,account=account) return redirect(url_for('login')) #Diagnose based on Symptoms First Step @app.route('/diagnose') def diagnose(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() if session["isdoctor"]: cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) else: cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() return render_template('diagnose.html', account=account) # User is not loggedin redirect to login page return redirect(url_for('login')) #Diagnose based on Symptoms Second Step @app.route('/diagnoseproceed',methods=['GET','POST']) def diagnoseproceed(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() if session["isdoctor"]: cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) else: cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() cursor.execute('SELECT * FROM symptoms ORDER BY Symptom_Name ASC') sym = cursor.fetchall() sym1=[] for i in sym: sym1.append(i) symptoms=[] #return str(sym1[0]) for i in sym1: #return str(i[1]) k=str(i[1]).split("_") l="" if(len(k)>1): for i in k: l=l+i.capitalize()+" " symptoms.append(l) else: l=l+k[0].capitalize() symptoms.append(l) if(request.method == 'POST'): n = int(request.form['n']) return render_template('diagnoseproceed.html', account=account,n=n,symptoms=symptoms,sym1=sym1) # User is not loggedin redirect to login page return redirect(url_for('login')) #Diagnose based on Symptoms Third Step @app.route('/diagnosefinal',methods=['GET','POST']) def diagnosefinal(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() if session["isdoctor"]: cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) else: cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() if(request.method == 'POST'): n = int(request.form['n']) l=[] data = pd.read_csv("Manual-Data/Training.csv") df = pd.DataFrame(data) cols = df.columns cols = cols[:-1] x = df[cols] y = df['prognosis'] features = cols feature_dict = {} filename = 'finalized_model.sav' for i,f in enumerate(features): feature_dict[f] = i for i in range(0,n): l.append(request.form['sym'+str(i)]) for i in l: s=i m=feature_dict[s] if (m!=0): sample_x = [i/m if i ==m else i*0 for i in range(len(features))] loaded_model = pickle.load(open(filename, 'rb')) sample_x = np.array(sample_x).reshape(1,len(sample_x)) p_disease=loaded_model.predict(sample_x) answer = p_disease[0] cursor1 = mysql.get_db().cursor() cursor1.execute('SELECT * FROM medicine WHERE Disease = %s', [answer]) medicine = cursor1.fetchone() cursor2 = mysql.get_db().cursor() cursor2.execute('SELECT * FROM doctor_fields WHERE Disease = %s', [answer]) special = cursor2.fetchone() return render_template('diagnosefinal.html', account=account,n=n,symptoms=l,answer=answer,medicine=medicine[2],special=special[2]) # User is not loggedin redirect to login page return redirect(url_for('login')) #Diagnose based on Natural Language @app.route('/diagnosedetails',methods=['GET','POST']) def diagnosedetails(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() if session["isdoctor"]: cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) else: cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() if(request.method == 'POST'): filename = 'disease_predict.sav' feel = request.form['feel'] if feel: data = [feel] cv = pickle.load(open("vectorizer.pickle", 'rb')) #Load vectorizer loaded_model = pickle.load(open(filename, 'rb')) vect=cv.transform(data).toarray() p=loaded_model.predict(vect) return render_template('diagnoseanswerNLP.html',account=account,ans=p[0]) else: msg = 'Please provide an input' flash(msg) return render_template('diagnoseNLP.html',account=account) else: return render_template('diagnoseNLP.html',account=account) # User is not loggedin redirect to login page return redirect(url_for('login')) # Diagnose Based on the Cardiovascular problems @app.route('/diagnosecardio',methods=['GET','POST']) def diagnosecardio(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() if(request.method == 'POST'): filename = 'fcardio.sav' Ageorig = 2021 - 50 Current_date = date(2021,6,1) DOB = date(Ageorig,1,1) delta = Current_date - DOB Age = delta.days gender = request.form['Gender'] height = request.form['Height'] weight= request.form['weight'] # in kilograms systolicbloodpressure= request.form['Sys'] # Systolic blood pressure diastolicbloodpressure= request.form['Dys'] # Diastolic blood pressure cholesterol= request.form['Chol'] # 1: normal, 2: above normal, 3: well above normal gluc= request.form['Gluc'] # 1: normal, 2: above normal, 3: well above normal smoke= request.form['Smoke'] # 1 if you smoke, 0 if not alco= request.form['Alco'] # 1 if you drink alcohol, 0 if not active= request.form['Active'] # 1 if you do physical activity, 0 if not agedayscale=(Age-10798)/(23713-10798) heightscale=(int(height)-55)/(250-55) weightscale=(int(weight)-10)/(200-10) sbpscale=(int(systolicbloodpressure)-(-150))/(16020-(-150)) dbpscale=(int(diastolicbloodpressure)-(-70))/(11000-(-70)) cholesterolscale=(int(cholesterol)-1)/(3-1) glucscale=(int(gluc)-1)/(3-1) single=np.array([agedayscale, gender, heightscale, weightscale, sbpscale, dbpscale, cholesterolscale, glucscale, smoke, alco, active ]) # single=np.array([0.9999423466430055, # 9.194872153039131e-05, # 0.007769666969318066, # 0.003310153975094087, # 0.0055169232918234785, # 0.0036779488612156525, # 4.5974360765195655e-05, # 4.5974360765195655e-05, # 0.0, # 0.0, # 4.5974360765195655e-05]) singledf=pd.DataFrame(single) data=singledf.transpose() loaded_model = pickle.load(open(filename, 'rb')) p=loaded_model.predict(data) if(p[0]==0.0): resultcardio = "Negative" else: resultcardio = "Positive" return render_template('cardioanswer.html',account=account,ans=resultcardio) else: return render_template('cardiodetails.html',account=account) # User is not loggedin redirect to login page return redirect(url_for('login')) # Account information visible inside dashboard @app.route('/myaccount') def myaccount(): if 'loggedin' in session: cursor = mysql.get_db().cursor() if session["isdoctor"]: cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) else: cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() return render_template('myaccount.html', account=account, isDoctor = session["isdoctor"]) else: return redirect(url_for('login')) #Hospitals near to the Address using GeoCoding @app.route('/hospitals') def hospitals(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() if(account is None): cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) account = cursor.fetchone() address = account[9] else: address = account[5] # enter your api key here API_KEY = 'your google api key' str1 = str(address).split(",") l="" for i in range(0,len(str1)): l=l+str1[i]+"+" send_url = 'https://maps.googleapis.com/maps/api/geocode/json?address='+l+'&key='+API_KEY r = requests.get(send_url) j = json.loads(r.text) lat = j['results'][0]['geometry']['location']['lat'] lon = j['results'][0]['geometry']['location']['lng'] # Initialising the GooglePlaces constructor google_places = GooglePlaces(API_KEY) query_result = google_places.nearby_search( lat_lng ={'lat': lat, 'lng': lon}, radius = 5000, types =[types.TYPE_HOSPITAL]) places = [] # Iterate over the search results for place in query_result.places: # print(type(place)) # place.get_details() places.append(place.name) #print("Latitude", place.geo_location['lat']) #print("Longitude", place.geo_location['lng']) return render_template('hospitals.html', places=places, account=account) # User is not loggedin redirect to login page return redirect(url_for('login')) #Set Hospital @app.route('/hospitalset',methods=['GET', 'POST']) def hospitalset(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) account = cursor.fetchone() # enter your api key here API_KEY = 'your google api key' str1 = str(account[9]).split(",") l="" for i in range(0,len(str1)): l=l+str1[i]+"+" send_url = 'https://maps.googleapis.com/maps/api/geocode/json?address='+l+'&key='+API_KEY r = requests.get(send_url) j = json.loads(r.text) lat = j['results'][0]['geometry']['location']['lat'] lon = j['results'][0]['geometry']['location']['lng'] # Initialising the GooglePlaces constructor google_places = GooglePlaces(API_KEY) query_result = google_places.nearby_search( lat_lng ={'lat': lat, 'lng': lon}, radius = 5000, types =[types.TYPE_HOSPITAL]) places = [] # Iterate over the search results for place in query_result.places: places.append(place.name) if(request.method == 'POST'): hname = request.form['hname'] cursor = mysql.get_db().cursor() cursor.execute('UPDATE doctors SET Hospital_Name= %s WHERE ID= %s', [hname,session['id']]) return render_template('dashboard.html', account=account) return render_template('hospitalset.html', places=places, account=account) # User is not loggedin redirect to login page return redirect(url_for('login')) #Book an Appointment @app.route('/book',methods=['GET', 'POST']) def book(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() if(account is None): cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) account = cursor.fetchone() address = account[9] else: address = account[5] # enter your api key here API_KEY = 'your google api key' str1 = str(address).split(",") l="" for i in range(0,len(str1)): l=l+str1[i]+"+" send_url = 'https://maps.googleapis.com/maps/api/geocode/json?address='+l+'&key='+API_KEY r = requests.get(send_url) j = json.loads(r.text) lat = j['results'][0]['geometry']['location']['lat'] lon = j['results'][0]['geometry']['location']['lng'] # Initialising the GooglePlaces constructor google_places = GooglePlaces(API_KEY) query_result = google_places.nearby_search( lat_lng ={'lat': lat, 'lng': lon}, radius = 5000, types =[types.TYPE_HOSPITAL]) places = [] # Iterate over the search results for place in query_result.places: # print(type(place)) # place.get_details() places.append(place.name) #print("Latitude", place.geo_location['lat']) #print("Longitude", place.geo_location['lng']) if(request.method == 'POST'): hname = request.form['hname'] time = request.form['time'] cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE Hospital_Name= %s', [hname]) doc = cursor.fetchone() cursor1 = mysql.get_db().cursor() cursor1.execute('INSERT INTO booking VALUES (NULL, %s, %s, %s, %s)', ( doc[0], session['id'], time, 0)) cursor2 = mysql.get_db().cursor() cursor2.execute('SELECT * FROM booking WHERE Patient_ID= %s', [session['id']]) l = cursor2.fetchall() print(l) return render_template('appointments.html', account=account,doc=doc,l=l) return render_template('book.html', places=places, account=account) # User is not loggedin redirect to login page return redirect(url_for('login')) #Appointments page for Patients @app.route('/appointments',methods=['GET', 'POST']) def appointments(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM users WHERE ID = %s', [session['id']]) account = cursor.fetchone() if(account is None): cursor = mysql.get_db().cursor() cursor.execute('SELECT * FROM doctors WHERE ID = %s', [session['id']]) account = cursor.fetchone() address = account[9] else: address = account[5] cursor2 = mysql.get_db().cursor() cursor2.execute('SELECT * FROM booking WHERE Patient_ID= %s', [session['id']]) l = cursor2.fetchall() arr = [] for i in l: cursor3 = mysql.get_db().cursor() cursor3.execute('SELECT * FROM doctors WHERE ID= %s', [i[1]]) doc = cursor3.fetchone() arr.append([doc[1],doc[9]]) return render_template('appointments.html', account=account,l=l,arr=arr) # User is not loggedin redirect to login page return redirect(url_for('login')) """ Code for the Chat App which is based on Sockets.io """ socketio = SocketIO(app) #Main Chat Interface @app.route('/chat') def sessions(): return render_template('chat.html') #Log Success of Messages def messageReceived(methods=['GET', 'POST']): print('message was received!!!') #Handles sending and receiving of Messages @socketio.on('my event') def handle_my_custom_event(json, methods=['GET', 'POST']): print('received my event: ' + str(json)) socketio.emit('my response', json, callback=messageReceived) # http://localhost:5000/logout - this will be the logout page @app.route('/logout') def logout(): # Remove session data, this will log the user out session.pop('loggedin', None) session.pop('id', None) session.pop('username', None) # Redirect to login page return redirect(url_for('index')) #run the Flask Server if __name__ == '__main__': socketio.run(app, debug=True) """-------------------------------End of Web Application-------------------------------"""
39.838405
230
0.553179
db144d06380b4543c24214daec54d9b6c3a6900b
3,594
py
Python
home/migrations/0002_guess_logs_search_history_singup.py
licface-django/licshop
ad15782fd32a970b6c0818b91f652197422a8c2a
[ "Apache-2.0" ]
null
null
null
home/migrations/0002_guess_logs_search_history_singup.py
licface-django/licshop
ad15782fd32a970b6c0818b91f652197422a8c2a
[ "Apache-2.0" ]
null
null
null
home/migrations/0002_guess_logs_search_history_singup.py
licface-django/licshop
ad15782fd32a970b6c0818b91f652197422a8c2a
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-27 03:44 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('home', '0001_initial'), ] operations = [ migrations.CreateModel( name='guess', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('access_time', models.DateTimeField(auto_now_add=True)), ('download_times', models.IntegerField()), ('ipaddress', models.GenericIPAddressField()), ('user_agent', models.CharField(max_length=200)), ], options={ 'db_table': 'guess', }, ), migrations.CreateModel( name='logs', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_create', models.DateTimeField(auto_now_add=True)), ('level', models.IntegerField()), ('facility', models.IntegerField(blank=True, null=True)), ('ipaddress', models.GenericIPAddressField()), ('message', models.TextField(blank=True, null=True)), ('tag', models.CharField(max_length=50)), ], options={ 'db_table': 'logs', }, ), migrations.CreateModel( name='search_history', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('access_time', models.DateTimeField(auto_now_add=True)), ('download_times', models.IntegerField()), ('ipaddress', models.GenericIPAddressField()), ('user_agent', models.CharField(blank=True, max_length=200, null=True)), ], options={ 'db_table': 'search_history', }, ), migrations.CreateModel( name='singup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=50, null=True)), ('last_name', models.CharField(blank=True, max_length=50, null=True)), ('street', models.CharField(blank=True, max_length=50, null=True)), ('city', models.CharField(blank=True, max_length=20, null=True)), ('district', models.CharField(blank=True, max_length=20, null=True)), ('country', models.CharField(blank=True, max_length=20, null=True)), ('zipcode', models.CharField(blank=True, max_length=10, null=True)), ('facebook', models.CharField(blank=True, max_length=50, null=True)), ('twitter', models.CharField(blank=True, max_length=50, null=True)), ('image', models.ImageField(height_field=300, upload_to=b'', verbose_name='image_profile', width_field=300)), ('user_id', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'db_table': 'singup', }, ), ]
43.829268
132
0.565109
94c57c098b2d523504224a21da202a73c7e4d9fc
5,513
py
Python
contrib/seeds/makeseeds.py
larrycameron80/1UP
c60cf6a01cb73a7e27a78264681f0794f470d06c
[ "MIT" ]
6
2018-02-02T13:57:06.000Z
2018-03-14T08:51:19.000Z
contrib/seeds/makeseeds.py
ezaruba/1UP
c60cf6a01cb73a7e27a78264681f0794f470d06c
[ "MIT" ]
1
2018-01-09T17:02:21.000Z
2018-01-09T17:02:21.000Z
contrib/seeds/makeseeds.py
ezaruba/1UP
c60cf6a01cb73a7e27a78264681f0794f470d06c
[ "MIT" ]
14
2017-12-29T00:50:51.000Z
2022-03-11T21:17:27.000Z
#!/usr/bin/env python3 # Copyright (c) 2013-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Generate seeds.txt from Pieter's DNS seeder # NSEEDS=512 MAX_SEEDS_PER_ASN=2 MIN_BLOCKS = 615801 # These are hosts that have been observed to be behaving strangely (e.g. # aggressively connecting to every node). SUSPICIOUS_HOSTS = { "" } import re import sys import dns.resolver import collections PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$") PATTERN_AGENT = re.compile(r"^(/1UPCore:2.2.(0|1|99)/)$") def parseline(line): sline = line.split() if len(sline) < 11: return None m = PATTERN_IPV4.match(sline[0]) sortkey = None ip = None if m is None: m = PATTERN_IPV6.match(sline[0]) if m is None: m = PATTERN_ONION.match(sline[0]) if m is None: return None else: net = 'onion' ipstr = sortkey = m.group(1) port = int(m.group(2)) else: net = 'ipv6' if m.group(1) in ['::']: # Not interested in localhost return None ipstr = m.group(1) sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds port = int(m.group(2)) else: # Do IPv4 sanity check ip = 0 for i in range(0,4): if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255: return None ip = ip + (int(m.group(i+2)) << (8*(3-i))) if ip == 0: return None net = 'ipv4' sortkey = ip ipstr = m.group(1) port = int(m.group(6)) # Skip bad results. if sline[1] == 0: return None # Extract uptime %. uptime30 = float(sline[7][:-1]) # Extract Unix timestamp of last success. lastsuccess = int(sline[2]) # Extract protocol version. version = int(sline[10]) # Extract user agent. if len(sline) > 11: agent = sline[11][1:] + sline[12][:-1] else: agent = sline[11][1:-1] # Extract service flags. service = int(sline[9], 16) # Extract blocks. blocks = int(sline[8]) # Construct result. return { 'net': net, 'ip': ipstr, 'port': port, 'ipnum': ip, 'uptime': uptime30, 'lastsuccess': lastsuccess, 'version': version, 'agent': agent, 'service': service, 'blocks': blocks, 'sortkey': sortkey, } def filtermultiport(ips): '''Filter out hosts with more nodes per IP''' hist = collections.defaultdict(list) for ip in ips: hist[ip['sortkey']].append(ip) return [value[0] for (key,value) in list(hist.items()) if len(value)==1] # Based on Greg Maxwell's seed_filter.py def filterbyasn(ips, max_per_asn, max_total): # Sift out ips by type ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4'] ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6'] ips_onion = [ip for ip in ips if ip['net'] == 'onion'] # Filter IPv4 by ASN result = [] asn_count = {} for ip in ips_ipv4: if len(result) == max_total: break try: asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) if asn not in asn_count: asn_count[asn] = 0 if asn_count[asn] == max_per_asn: continue asn_count[asn] += 1 result.append(ip) except: sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n') # TODO: filter IPv6 by ASN # Add back non-IPv4 result.extend(ips_ipv6) result.extend(ips_onion) return result def main(): lines = sys.stdin.readlines() ips = [parseline(line) for line in lines] # Skip entries with valid address. ips = [ip for ip in ips if ip is not None] # Skip entries from suspicious hosts. ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS] # Enforce minimal number of blocks. ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] # Require service bit 1. ips = [ip for ip in ips if (ip['service'] & 1) == 1] # Require at least 50% 30-day uptime. ips = [ip for ip in ips if ip['uptime'] > 50] # Require a known and recent user agent. ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))] # Sort by availability (and use last success as tie breaker) ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) # Filter out hosts with multiple bitcoin ports, these are likely abusive ips = filtermultiport(ips) # Look up ASNs and limit results, both per ASN and globally. ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) # Sort the results by IP address (for deterministic output). ips.sort(key=lambda x: (x['net'], x['sortkey'])) for ip in ips: if ip['net'] == 'ipv6': print('[%s]:%i' % (ip['ip'], ip['port'])) else: print('%s:%i' % (ip['ip'], ip['port'])) if __name__ == '__main__': main()
32.052326
186
0.566842
c2596f5e5e4477e11b4fc144e0249d5ea6aed3ff
1,000
py
Python
setup.py
oksumoron/locust
fddfefe7ef1082bc5284cd2dd8477221484dfb0c
[ "MIT" ]
2
2021-02-14T22:33:59.000Z
2021-02-14T23:36:18.000Z
setup.py
oksumoron/locust
fddfefe7ef1082bc5284cd2dd8477221484dfb0c
[ "MIT" ]
1
2021-03-01T13:47:36.000Z
2021-03-01T13:47:36.000Z
setup.py
oksumoron/locust
fddfefe7ef1082bc5284cd2dd8477221484dfb0c
[ "MIT" ]
1
2020-07-21T10:49:05.000Z
2020-07-21T10:49:05.000Z
# -*- coding: utf-8 -*- import ast import os import re import sys from setuptools import find_packages, setup ROOT_PATH = os.path.abspath(os.path.dirname(__file__)) # parse version from locust/__init__.py _version_re = re.compile(r"__version__\s+=\s+(.*)") _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py") with open(_init_file, "rb") as f: version = str(ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1))) setup( name="locust", version=version, install_requires=[ "gevent>=20.9.0", "flask>=1.1.2", "Werkzeug>=1.0.1", "requests>=2.9.1", "msgpack>=0.6.2", "pyzmq>=16.0.2", "geventhttpclient>=1.4.4", "ConfigArgParse>=1.0", "psutil>=5.6.7", "Flask-BasicAuth>=0.2.0", ], test_suite="locust.test", tests_require=[ "cryptography", "mock", "pyquery", ], extras_require={ ":sys_platform == 'win32'": ["pywin32"], }, )
23.809524
90
0.581
2f6c42d47942e4bac3f9eb2a2627df6605e22aac
3,160
py
Python
pretrainer.py
yishayahu/covid19_weak_supervision
9f2b2b5f13ac0fd69ad2f54304954f8039315760
[ "Apache-2.0" ]
null
null
null
pretrainer.py
yishayahu/covid19_weak_supervision
9f2b2b5f13ac0fd69ad2f54304954f8039315760
[ "Apache-2.0" ]
null
null
null
pretrainer.py
yishayahu/covid19_weak_supervision
9f2b2b5f13ac0fd69ad2f54304954f8039315760
[ "Apache-2.0" ]
null
null
null
import libauc import segmentation_models_pytorch from libauc.datasets import CheXpert from libauc.models import DenseNet121 import torch from PIL import Image import numpy as np import torchvision.transforms as transforms from torch import nn from libauc.losses import AUCM_MultiLabel, CrossEntropyLoss from torch.optim import Adam from torch.utils.data import Dataset from sklearn.metrics import roc_auc_score from tqdm import tqdm def set_all_seeds(SEED): # REPRODUCIBILITY torch.manual_seed(SEED) np.random.seed(SEED) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # dataloader # paramaters SEED = 123 BATCH_SIZE = 16 lr = 1e-4 weight_decay = 1e-5 device = 'cuda:0' if torch.cuda.is_available() else 'cpu' root = '/mnt/dsi_vol1/shaya/' # Index: -1 denotes multi-label mode including 5 diseases traindSet = CheXpert(csv_path=root+'train.csv', image_root_path=root, use_upsampling=False, use_frontal=True, image_size=384, mode='train', class_index=-1) testSet = CheXpert(csv_path=root+'valid.csv', image_root_path=root, use_upsampling=False, use_frontal=True, image_size=384, mode='valid', class_index=-1) trainloader = torch.utils.data.DataLoader(traindSet, batch_size=BATCH_SIZE, shuffle=True) testloader = torch.utils.data.DataLoader(testSet, batch_size=BATCH_SIZE, shuffle=False) # model set_all_seeds(SEED) model = segmentation_models_pytorch.Unet( encoder_name="densenet121", encoder_weights="imagenet", classes=1,aux_params ={'classes':5} ) model = model.to(device) # yy = DenseNet121(num_classes=5) # define loss & optimizer CELoss = CrossEntropyLoss() optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) # training best_val_auc = 0 for epoch in range(1): for idx, data in tqdm(enumerate(trainloader),desc='train',total=len(trainloader)): train_data, train_labels = data train_data, train_labels = train_data.to(device), train_labels.to(device) y_pred = model(train_data)[1] loss = CELoss(y_pred, train_labels) optimizer.zero_grad() loss.backward() optimizer.step() # validation if idx % 400 == 0: model.eval() with torch.no_grad(): test_pred = [] test_true = [] for jdx, data in tqdm(enumerate(testloader),desc='val'): test_data, test_labels = data test_data = test_data.to(device) y_pred = model(test_data)[1] test_pred.append(y_pred.cpu().detach().numpy()) test_true.append(test_labels.numpy()) test_true = np.concatenate(test_true) test_pred = np.concatenate(test_pred) val_auc_mean = roc_auc_score(test_true, test_pred) model.train() if best_val_auc < val_auc_mean: best_val_auc = val_auc_mean torch.save(model.state_dict(), 'ce_pretrained_model.pth') print ('Epoch=%s, BatchID=%s, Val_AUC=%.4f, Best_Val_AUC=%.4f'%(epoch, idx, val_auc_mean, best_val_auc ))
35.505618
155
0.679114
0bf3ed3fbc178e1fd5a5349e04b723b9ef7fe3c7
1,876
py
Python
kaolin/metrics/voxelgrid.py
mcx/kaolin
abe006921b5d522ecd0f7c5e30abe760a4459dc7
[ "ECL-2.0", "Apache-2.0" ]
1
2022-03-09T12:18:41.000Z
2022-03-09T12:18:41.000Z
kaolin/metrics/voxelgrid.py
mcx/kaolin
abe006921b5d522ecd0f7c5e30abe760a4459dc7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
kaolin/metrics/voxelgrid.py
mcx/kaolin
abe006921b5d522ecd0f7c5e30abe760a4459dc7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright (c) 2019,20 NVIDIA CORPORATION & AFFILIATES. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def iou(pred, gt): r"""Computes IoU across two voxelgrids Arguments: pred (torch.Tensor): predicted (binary) voxelgrids, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. gt (torch.Tensor): ground-truth (binary) voxelgrids, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. Returns: (torch.FloatTensor): the intersection over union value. Example: >>> pred = torch.tensor([[[[0., 0.], ... [1., 1.]], ... [[1., 1.], ... [1., 1.]]]]) >>> gt = torch.ones((1,2,2,2)) >>> iou(pred, gt) tensor([0.7500]) """ if pred.shape != gt.shape: raise ValueError( f"Expected predicted voxelgrids and ground truth voxelgrids to have " f"the same shape, but got {pred.shape} for predicted and {gt.shape} for ground truth.") pred = pred.bool() gt = gt.bool() intersection = torch.sum(torch.logical_and(pred, gt), dim=(1, 2, 3)).float() union = torch.sum(torch.logical_or(pred, gt), dim=(1, 2, 3)).float() return intersection / union
36.784314
99
0.592751
4b60bdfb2468eba863b282e9bf26951034ead395
2,724
py
Python
salt/grains/esxi.py
jeremysnyder/salt
b95213ec903402f25c1e0aeb3990fe8452ab63ce
[ "Apache-2.0" ]
1
2020-07-06T01:33:23.000Z
2020-07-06T01:33:23.000Z
salt/grains/esxi.py
jeremysnyder/salt
b95213ec903402f25c1e0aeb3990fe8452ab63ce
[ "Apache-2.0" ]
null
null
null
salt/grains/esxi.py
jeremysnyder/salt
b95213ec903402f25c1e0aeb3990fe8452ab63ce
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Generate baseline proxy minion grains for ESXi hosts. .. versionadded:: 2015.8.4 """ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs import salt.utils.platform from salt.exceptions import SaltSystemExit if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "esxi": import salt.modules.vsphere __proxyenabled__ = ["esxi"] __virtualname__ = "esxi" log = logging.getLogger(__file__) GRAINS_CACHE = {} def __virtual__(): try: if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "esxi": return __virtualname__ except KeyError: pass return False def esxi(): return _grains() def kernel(): return {"kernel": "proxy"} def os(): if not GRAINS_CACHE: GRAINS_CACHE.update(_grains()) try: return {"os": GRAINS_CACHE.get("fullName")} except AttributeError: return {"os": "Unknown"} def os_family(): return {"os_family": "proxy"} def _find_credentials(host): """ Cycle through all the possible credentials and return the first one that works. """ user_names = [__pillar__["proxy"].get("username", "root")] passwords = __pillar__["proxy"]["passwords"] for user in user_names: for password in passwords: try: # Try to authenticate with the given user/password combination ret = salt.modules.vsphere.system_info( host=host, username=user, password=password ) except SaltSystemExit: # If we can't authenticate, continue on to try the next password. continue # If we have data returned from above, we've successfully authenticated. if ret: return user, password # We've reached the end of the list without successfully authenticating. raise SaltSystemExit( "Cannot complete login due to an incorrect user name or password." ) def _grains(): """ Get the grains from the proxied device. """ try: host = __pillar__["proxy"]["host"] if host: username, password = _find_credentials(host) protocol = __pillar__["proxy"].get("protocol") port = __pillar__["proxy"].get("port") ret = salt.modules.vsphere.system_info( host=host, username=username, password=password, protocol=protocol, port=port, ) GRAINS_CACHE.update(ret) except KeyError: pass return GRAINS_CACHE
24.540541
87
0.608664
209f45a9894f176f23d4c3223d3b78297dd5e612
994
py
Python
isi_sdk_8_0_1/test/test_file_filter_settings_extended.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
24
2018-06-22T14:13:23.000Z
2022-03-23T01:21:26.000Z
isi_sdk_8_0_1/test/test_file_filter_settings_extended.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
46
2018-04-30T13:28:22.000Z
2022-03-21T21:11:07.000Z
isi_sdk_8_0_1/test/test_file_filter_settings_extended.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
29
2018-06-19T00:14:04.000Z
2022-02-08T17:51:19.000Z
# coding: utf-8 """ Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 4 Contact: sdk@isilon.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import isi_sdk_8_0_1 from isi_sdk_8_0_1.models.file_filter_settings_extended import FileFilterSettingsExtended # noqa: E501 from isi_sdk_8_0_1.rest import ApiException class TestFileFilterSettingsExtended(unittest.TestCase): """FileFilterSettingsExtended unit test stubs""" def setUp(self): pass def tearDown(self): pass def testFileFilterSettingsExtended(self): """Test FileFilterSettingsExtended""" # FIXME: construct object with mandatory attributes with example values # model = isi_sdk_8_0_1.models.file_filter_settings_extended.FileFilterSettingsExtended() # noqa: E501 pass if __name__ == '__main__': unittest.main()
24.243902
111
0.732394
316df7ae05c7793abab993e1f943268ba9daac3e
8,957
py
Python
test_scripts/ns_instance/duan/service/vfc/nfvo/lcm/lcm/ns/biz/ns_heal.py
lremember/VFC
837559db1396091811382359100bfc60e1aab5b2
[ "MIT" ]
4
2018-08-29T02:51:38.000Z
2021-11-16T11:36:11.000Z
test_scripts/ns_instance/duan/service/vfc/nfvo/lcm/lcm/ns/biz/ns_heal.py
lremember/VFC-Files
837559db1396091811382359100bfc60e1aab5b2
[ "MIT" ]
null
null
null
test_scripts/ns_instance/duan/service/vfc/nfvo/lcm/lcm/ns/biz/ns_heal.py
lremember/VFC-Files
837559db1396091811382359100bfc60e1aab5b2
[ "MIT" ]
1
2019-05-12T08:21:19.000Z
2019-05-12T08:21:19.000Z
# Copyright 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import logging import threading import time import traceback from lcm.ns.enum import NS_INST_STATUS from lcm.pub.database.models import JobModel, NSInstModel, NfInstModel, VNFCInstModel, VmInstModel from lcm.pub.exceptions import NSLCMException from lcm.pub.utils.jobutil import JobUtil from lcm.jobs.enum import JOB_MODEL_STATUS, JOB_PROGRESS from lcm.pub.utils.values import ignore_case_get from lcm.ns_vnfs.biz.heal_vnfs import NFHealService from lcm.ns.biz.ns_lcm_op_occ import NsLcmOpOcc logger = logging.getLogger(__name__) class NSHealService(threading.Thread): def __init__(self, ns_instance_id, request_data, job_id): super(NSHealService, self).__init__() self.ns_instance_id = ns_instance_id self.request_data = request_data self.job_id = job_id self.occ_id = NsLcmOpOcc.create(ns_instance_id, "HEAL", "PROCESSING", False, request_data) self.heal_vnf_data = '' self.heal_ns_data = '' def run(self): try: self.do_biz() except NSLCMException as e: JobUtil.add_job_status(self.job_id, JOB_PROGRESS.ERROR, e.args[0]) NsLcmOpOcc.update(self.occ_id, operationState="FAILED", error=e.args[0]) except Exception as e: logger.error(traceback.format_exc()) JobUtil.add_job_status(self.job_id, JOB_PROGRESS.ERROR, 'ns heal fail') NsLcmOpOcc.update(self.occ_id, operationState="FAILED", error=e.args[0]) def do_biz(self): self.update_job(1, desc='ns heal start') self.get_and_check_params() self.update_ns_status(NS_INST_STATUS.HEALING) self.do_heal() self.update_ns_status(NS_INST_STATUS.ACTIVE) self.update_job(100, desc='ns heal success') NsLcmOpOcc.update(self.occ_id, "COMPLETED") def get_and_check_params(self): ns_info = NSInstModel.objects.filter(id=self.ns_instance_id) if not ns_info: errmsg = 'NS [id=%s] does not exist' % self.ns_instance_id raise NSLCMException(errmsg) self.heal_ns_data = ignore_case_get(self.request_data, 'healNsData') self.heal_vnf_data = ignore_case_get(self.request_data, 'healVnfData') if self.heal_ns_data and self.heal_vnf_data: errmsg = 'healNsData and healVnfData can not exist together' logger.error(errmsg) raise NSLCMException(errmsg) if not self.heal_ns_data and not self.heal_vnf_data: errmsg = 'healNsData and healVnfData parameters does not exist or value is incorrect.' raise NSLCMException(errmsg) def do_heal(self): if self.heal_vnf_data: vnf_heal_params = self.prepare_vnf_heal_params(self.heal_vnf_data) status = self.do_vnf_or_ns_heal(vnf_heal_params, 15) if status is JOB_MODEL_STATUS.FINISHED: logger.info('nf[%s] heal handle end' % vnf_heal_params.get('vnfInstanceId')) self.update_job(90, desc='nf[%s] heal handle end' % vnf_heal_params.get('vnfInstanceId')) else: errmsg = 'nf heal failed' raise NSLCMException(errmsg) else: ns_heal_params = self.prepare_ns_heal_params(self.heal_ns_data) for ns_heal_param in ns_heal_params: status = self.do_vnf_or_ns_heal(ns_heal_param, 15) if status is JOB_MODEL_STATUS.FINISHED: logger.info('nf[%s] heal handle end' % ns_heal_param.get('vnfInstanceId')) self.update_job(90, desc='nf[%s] heal handle end' % ns_heal_param.get('vnfInstanceId')) else: errmsg = 'nf heal failed' logger.error(errmsg) raise NSLCMException(errmsg) def do_vnf_or_ns_heal(self, heal_param, progress): instance_id = heal_param.get('vnfInstanceId') nf_service = NFHealService(instance_id, heal_param) nf_service.start() self.update_job( progress, desc='nf[%s] heal handle start' % instance_id) status = self.wait_job_finish(nf_service.job_id) return status def prepare_ns_heal_params(self, ns_data): degree_healing = ignore_case_get(ns_data, 'degreeHealing') if not degree_healing: errmsg = 'degreeHealing does not exist.' logger.error(errmsg) raise NSLCMException(errmsg) ns_instance_id = self.ns_instance_id cause = 'vm is down' # action = ignore_case_get(ns_data, 'actionsHealing') if degree_healing == "HEAL_RESTORE": ns_inst_infos = NfInstModel.objects.filter(ns_inst_id=self.ns_instance_id) if not ns_inst_infos.exists(): raise NSLCMException('NSInsts(%s) does not exist' % self.ns_instance_id) result_arr = [] for ns_inst_info in ns_inst_infos: vnfc_insts = VNFCInstModel.objects.filter(nfinstid=ns_inst_info.nfinstid) # If a condition is not met, will it all terminate? if not vnfc_insts.exists(): raise NSLCMException('vnfcinsts(%s) does not exist' % ns_inst_info.nfinstid) for vnfc_inst in vnfc_insts: vm_id = vnfc_inst.vmid vdu_id = vnfc_inst.vduid vm_inst_info = VmInstModel.objects.filter(vmid=vm_id) if not vm_inst_info.exists(): raise NSLCMException('vminstinfo(%s) does not exist' % vm_id) vm_name = vm_inst_info[0].vmname result = { "vnfInstanceId": ns_instance_id, "cause": cause, "additionalParams": { "action": "restartvm", "actionvminfo": { "vmid": vm_id, "vduid": vdu_id, "vmname": vm_name } } } result_arr.append(result) return result_arr else: errmsg = 'The degree of healing dose not exist or value is incorrect.' logger.error(errmsg) raise NSLCMException(errmsg) def prepare_vnf_heal_params(self, vnf_data): vnf_instance_id = ignore_case_get(vnf_data, 'vnfInstanceId') if not vnf_instance_id: errmsg = 'vnfinstanceid does not exist or value is incorrect.' logger.error(errmsg) raise NSLCMException(errmsg) cause = ignore_case_get(vnf_data, 'cause') additional_params = ignore_case_get(vnf_data, 'additionalParams') action = ignore_case_get(additional_params, 'action') action_vm_info = ignore_case_get(additional_params, 'actionvminfo') vm_id = ignore_case_get(action_vm_info, 'vmid') vdu_id = ignore_case_get(action_vm_info, 'vduid') vm_name = ignore_case_get(action_vm_info, 'vmname') result = { "vnfInstanceId": vnf_instance_id, "cause": cause, "additionalParams": { "action": action, "actionvminfo": { "vmid": vm_id, "vduid": vdu_id, "vmname": vm_name } } } return result @staticmethod def wait_job_finish(sub_job_id, timeout=3600): query_interval = 2 start_time = end_time = datetime.datetime.now() while (end_time - start_time).seconds < timeout: job_result = JobModel.objects.get(jobid=sub_job_id) time.sleep(query_interval) end_time = datetime.datetime.now() if job_result.progress == JOB_PROGRESS.FINISHED: return JOB_MODEL_STATUS.FINISHED elif job_result.progress > JOB_PROGRESS.FINISHED: return JOB_MODEL_STATUS.ERROR else: continue return JOB_MODEL_STATUS.TIMEOUT def update_job(self, progress, desc=''): JobUtil.add_job_status(self.job_id, progress, desc) def update_ns_status(self, status): NSInstModel.objects.filter( id=self.ns_instance_id).update(status=status)
43.0625
107
0.621413
85da8e7ae7746f53e78ea46a85a8dd127f77c50e
16,936
py
Python
server.py
matanhaller/Pype
fd39944a9cc25020c55c42f6205bb10e4e0fce4b
[ "MIT" ]
1
2018-01-03T08:55:38.000Z
2018-01-03T08:55:38.000Z
server.py
matanhaller/Pype
fd39944a9cc25020c55c42f6205bb10e4e0fce4b
[ "MIT" ]
2
2018-01-03T08:51:46.000Z
2018-01-03T08:54:43.000Z
server.py
matanhaller/Pype
fd39944a9cc25020c55c42f6205bb10e4e0fce4b
[ "MIT" ]
1
2018-01-03T08:50:32.000Z
2018-01-03T08:50:32.000Z
"""Central server of app. """ # Imports import logging import socket import select import json from task import Task from user import User from call import Call class PypeServer(object): """App server class. Attributes: ADDR (tuple): Address to which the server is bound. call_dct (dict): Dictionary mapping call master to call object. conn_dct (dict): Dictionary mapping all active connections to their addresses. LISTEN_QUEUE_SIZE (int): Number of connections that server can queue before accepting (5 is typically enough). (static) logger (logging.Logger): Logging object. MAX_RECV_SIZE (int): Maximum number of bytes to receive at once. multicast_addr_counter (int): Counter of the number of used multicast addresses. multicast_addr_lst (list): List of already used multicast addresses. server_listener (socket.socket): Server socket. (static) task_lst (list): List of all pending tasks. user_dct (dict): Dictionary mapping username to user object. """ ADDR = ('', 5050) LISTEN_QUEUE_SIZE = 5 MAX_RECV_SIZE = 65536 def __init__(self): """Constructor method. """ # Configuring logger logging.basicConfig( format='[%(asctime)s]%(levelname)s: %(message)s', datefmt='%d-%m-%Y %H:%M:%S') self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.INFO) self.server_listener = socket.socket( socket.AF_INET, socket.SOCK_STREAM) self.server_listener.bind(PypeServer.ADDR) self.server_listener.listen(PypeServer.LISTEN_QUEUE_SIZE) self.conn_dct = {} self.task_lst = [] self.user_dct = {} self.call_dct = {} self.multicast_addr_lst = [] self.multicast_addr_counter = 0 def run(self): """Server mainloop method. """ while True: read_lst, write_lst, err_lst = select.select( self.conn_dct.keys() + [self.server_listener], *((self.conn_dct.keys(),) * 2)) # Handling readables self.handle_readables(read_lst) # Handling tasks self.handle_tasks(write_lst) def handle_readables(self, read_lst): """Handles all readable connections in mainloop. Args: read_lst (list): Readable connections list. """ for conn in read_lst: # Handling new connections if conn is self.server_listener: new_conn, addr = self.server_listener.accept() self.conn_dct[new_conn] = addr self.logger.info('{} connected.'.format(addr)) else: raw_data = conn.recv(PypeServer.MAX_RECV_SIZE) # Closing socket if disconnected if not raw_data: user = self.get_user_from_conn(conn) if user: del self.user_dct[user.name] # Notifying other users that user has left self.report_user_update( subtype='leave', name=user.name) # Removing user from call if participated if user.call: self.handle_call_user_leave(user) self.logger.info('{} left.'.format(user.name)) self.logger.info( '{} disconnected.'.format(self.conn_dct[conn])) del self.conn_dct[conn] conn.close() else: # Parsing JSON data data_lst = self.get_jsons(raw_data) # Handling messages for data in data_lst: # print data # Join request/response if data['type'] == 'join': # Checking if username already exists if data['name'] in self.user_dct: self.task_lst.append(Task(conn, { 'type': 'join', 'subtype': 'response', 'status': 'no' })) else: # Creating new user self.user_dct[data['name']] = User( conn=conn, **data) # Sending relevant info to new user user_info_lst, call_info_lst = [], [] for username in self.user_dct: if username != data['name']: user_info_lst.append({ 'name': username, 'status': self.user_dct[username].status, }) for master in self.call_dct: call_info_lst.append({ 'master': master, 'user_lst': self.call_dct[master].user_lst }) self.task_lst.append(Task(conn, { 'type': 'join', 'subtype': 'response', 'status': 'ok', 'name': data['name'], 'user_info_lst': user_info_lst, 'call_info_lst': call_info_lst })) self.logger.info( '{} joined.'.format(data['name'])) # Reporting user join to other users self.report_user_update(subtype='join', **data) # Call request/response elif data['type'] == 'call': # Call request if data['subtype'] == 'request': caller = self.get_user_from_conn(conn) for username in [caller.name, data['callee']]: if self.user_dct[username].status == 'available': self.user_dct[username].switch_status() self.report_user_update( subtype='status', name=username) self.task_lst.append(Task(self.user_dct[ data['callee']].conn, { 'type': 'call', 'subtype': 'participate', 'caller': caller.name })) # Call response elif data['subtype'] == 'callee_response': caller = data['caller'] callee = self.get_user_from_conn(conn) response_msg = { 'type': 'call', 'subtype': 'callee_response', 'status': data['status'] } # Accept if data['status'] == 'accept': if self.user_dct[caller].call or callee.call: # Adding user to existing call if self.user_dct[caller].call: call = self.user_dct[caller].call callee.join_call(call) self.report_call_update( subtype='user_join', master=call.master, name=callee.name, addr=self.conn_dct[callee.conn]) self.logger.info( '{} joined a call.'.format(callee.name)) else: call = callee.call self.user_dct[ caller].join_call(call) self.report_call_update( subtype='user_join', master=call.master, name=caller, addr=self.conn_dct[self.user_dct[caller].conn]) self.logger.info( '{} joined a call.'.format(self.user_dct[caller].name)) else: # Creating new call call = Call({ 'audio': self.get_multicast_addr(), 'video': self.get_multicast_addr(), 'chat': self.get_multicast_addr() }) for user in [self.user_dct[caller], callee]: user.join_call(call) self.call_dct[caller] = call self.report_call_update( subtype='call_add', master=caller, user_lst=call.user_lst) self.logger.info('Call started, participants: {}.'.format( ', '.join(call.user_lst))) # Composing response message self.user_dct[caller].call = call response_msg['master'] = call.master response_msg['user_lst'] = call.user_lst response_msg['unicast_addrs'] = {user: self.conn_dct[ self.user_dct[user].conn] for user in call.user_lst} # Adding multicast addresses to response response_msg['addrs'] = call.addr_dct self.task_lst.append( Task(conn, response_msg)) # Reject else: callee = self.get_user_from_conn(conn) for username in [callee.name, data['caller']]: self.user_dct[username].switch_status() self.report_user_update( subtype='status', name=username) self.task_lst.append( Task(self.user_dct[caller].conn, response_msg)) # Session messages elif data['type'] == 'session': user = self.get_user_from_conn(conn) self.handle_call_user_leave(user) def handle_tasks(self, write_lst): """Iterates over tasks and sends messages if possible. Args: write_lst (list): Writable connections list. """ for task in self.task_lst: if task.conn in write_lst: task.send_msg() self.task_lst.remove(task) def get_jsons(self, raw_data): """Retreives JSON objects string. and parses it. Args: raw_data (str): Data to parse. Returns: list: Parsed JSON objects list. """ decoder = json.JSONDecoder() json_lst = [] while True: try: json_obj, end_index = decoder.raw_decode(raw_data) json_lst.append(json_obj) raw_data = raw_data[end_index:] except ValueError: break return json_lst def get_user_from_conn(self, conn): """Retreives user corresponding to connection (if exists). Args: conn (socket.socket): The connection to check. Returns: User: The user object corresponding to the connection (None if doesn't exist). """ for username in self.user_dct: if self.user_dct[username].conn is conn: return self.user_dct[username] return None def report_user_update(self, **kwargs): """Reports user join/leave/status change to other users. Args: **kwargs: Keyword arguments supplied in dictionary form. """ # Tweaking kwargs before message update if 'type' in kwargs: del kwargs['type'] if 'status' not in kwargs: kwargs['status'] = 'available' for active_user in self.user_dct: if kwargs['subtype'] in ['join', 'status']: if active_user == kwargs['name']: continue user_update_msg = { 'type': 'user_update', 'timestamp': None } user_update_msg.update(kwargs) self.task_lst.append( Task(self.user_dct[active_user].conn, user_update_msg)) def report_call_update(self, **kwargs): """Notifies users of changes in active calls. Args: **kwargs: Keyword arguments passed in dictionary form. """ # Tweaking kwargs before message update if 'type' in kwargs: del kwargs['type'] for active_user in self.user_dct: call_update_msg = { 'type': 'call_update', 'timestamp': None } call_update_msg.update(kwargs) self.task_lst.append( Task(self.user_dct[active_user].conn, call_update_msg)) def handle_call_user_leave(self, user): """Removes user from call and reports to other users. Args: user (User): User that left the call. """ # Changing call master if necessary call = user.call prev_master = call.master user.leave_call() del self.call_dct[prev_master] self.call_dct[call.master] = call self.logger.info('{} left a call.'.format(user.name)) # Removing call if user number reduced to 1 if len(call.user_lst) == 1: # Returning allocated addresses to list self.multicast_addr_lst += call.addr_dct.values() # Reporting call removal self.report_call_update( subtype='call_remove', master=prev_master) # Removing users from call del self.call_dct[call.master] for participant in call.user_lst: if participant in self.user_dct: self.user_dct[participant].switch_status() self.user_dct[participant].leave_call() self.report_user_update( subtype='status', name=participant) self.logger.info('A call ended.') else: self.report_call_update( subtype='user_leave', master=prev_master, new_master=call.master, name=user.name) # Changing user status if user.name in self.user_dct: user.switch_status() self.report_user_update( subtype='status', name=user.name) def get_multicast_addr(self): """Retreives a vacant multicast IP address. Returns: str: The given IP address. """ # If address list isn't empty, take address from there if self.multicast_addr_lst: return self.multicast_addr_lst.pop() # Else, take the next address in range self.multicast_addr_counter += 1 return '239.{}.{}.{}'.format(self.multicast_addr_counter / 65536, self.multicast_addr_counter / 256, self.multicast_addr_counter % 256) # Running server if __name__ == '__main__': PypeServer().run()
40.420048
108
0.447449
ee9e64defe6d9ee945a6537355e52804316761a7
935
py
Python
okaeri_sdk/aicensor/aicensor.py
OkaeriPoland/okaeri-sdk-python
6ecf04477f61587599ad4570bc27eee555be0817
[ "MIT" ]
null
null
null
okaeri_sdk/aicensor/aicensor.py
OkaeriPoland/okaeri-sdk-python
6ecf04477f61587599ad4570bc27eee555be0817
[ "MIT" ]
null
null
null
okaeri_sdk/aicensor/aicensor.py
OkaeriPoland/okaeri-sdk-python
6ecf04477f61587599ad4570bc27eee555be0817
[ "MIT" ]
null
null
null
from .model import AiCensorPredictionInfo from ..client import OkaeriClient, tostring, resolve_token, resolve_timeout, resolve_base_url @tostring class AiCensorError(BaseException): type: str message: str class AiCensor: def __init__(self, token: str, timeout: int = None, base_url: str = None): _token = resolve_token("OKAERI_SDK_AICENSOR_TOKEN", token) _base_url = base_url if base_url else resolve_base_url("OKAERI_SDK_AICENSOR_BASE_PATH", "https://ai-censor.okaeri.eu") _timeout = timeout if timeout else resolve_timeout("OKAERI_SDK_TIMEOUT", 5000) self._client = OkaeriClient({'Token': _token}, _base_url, timeout) def is_swear(self, phrase) -> bool: return self.get_prediction(phrase).general.swear def get_prediction(self, phrase) -> AiCensorPredictionInfo: return self._client.post("/predict", {'phrase': phrase}, AiCensorPredictionInfo, AiCensorError)
40.652174
126
0.737968
6f25996bec25ab39e2ac75e1047c888cd30e0b6b
8,123
py
Python
sdk/python/pulumi_azure_nextgen/solutions/v20200821preview/get_jit_request.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_nextgen/solutions/v20200821preview/get_jit_request.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_nextgen/solutions/v20200821preview/get_jit_request.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetJitRequestResult', 'AwaitableGetJitRequestResult', 'get_jit_request', ] @pulumi.output_type class GetJitRequestResult: """ Information about JIT request definition. """ def __init__(__self__, application_resource_id=None, created_by=None, jit_authorization_policies=None, jit_request_state=None, jit_scheduling_policy=None, location=None, name=None, provisioning_state=None, publisher_tenant_id=None, tags=None, type=None, updated_by=None): if application_resource_id and not isinstance(application_resource_id, str): raise TypeError("Expected argument 'application_resource_id' to be a str") pulumi.set(__self__, "application_resource_id", application_resource_id) if created_by and not isinstance(created_by, dict): raise TypeError("Expected argument 'created_by' to be a dict") pulumi.set(__self__, "created_by", created_by) if jit_authorization_policies and not isinstance(jit_authorization_policies, list): raise TypeError("Expected argument 'jit_authorization_policies' to be a list") pulumi.set(__self__, "jit_authorization_policies", jit_authorization_policies) if jit_request_state and not isinstance(jit_request_state, str): raise TypeError("Expected argument 'jit_request_state' to be a str") pulumi.set(__self__, "jit_request_state", jit_request_state) if jit_scheduling_policy and not isinstance(jit_scheduling_policy, dict): raise TypeError("Expected argument 'jit_scheduling_policy' to be a dict") pulumi.set(__self__, "jit_scheduling_policy", jit_scheduling_policy) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if publisher_tenant_id and not isinstance(publisher_tenant_id, str): raise TypeError("Expected argument 'publisher_tenant_id' to be a str") pulumi.set(__self__, "publisher_tenant_id", publisher_tenant_id) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if updated_by and not isinstance(updated_by, dict): raise TypeError("Expected argument 'updated_by' to be a dict") pulumi.set(__self__, "updated_by", updated_by) @property @pulumi.getter(name="applicationResourceId") def application_resource_id(self) -> str: """ The parent application id. """ return pulumi.get(self, "application_resource_id") @property @pulumi.getter(name="createdBy") def created_by(self) -> 'outputs.ApplicationClientDetailsResponse': """ The client entity that created the JIT request. """ return pulumi.get(self, "created_by") @property @pulumi.getter(name="jitAuthorizationPolicies") def jit_authorization_policies(self) -> Sequence['outputs.JitAuthorizationPoliciesResponse']: """ The JIT authorization policies. """ return pulumi.get(self, "jit_authorization_policies") @property @pulumi.getter(name="jitRequestState") def jit_request_state(self) -> str: """ The JIT request state. """ return pulumi.get(self, "jit_request_state") @property @pulumi.getter(name="jitSchedulingPolicy") def jit_scheduling_policy(self) -> 'outputs.JitSchedulingPolicyResponse': """ The JIT request properties. """ return pulumi.get(self, "jit_scheduling_policy") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The JIT request provisioning state. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publisherTenantId") def publisher_tenant_id(self) -> str: """ The publisher tenant id. """ return pulumi.get(self, "publisher_tenant_id") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ Resource type """ return pulumi.get(self, "type") @property @pulumi.getter(name="updatedBy") def updated_by(self) -> 'outputs.ApplicationClientDetailsResponse': """ The client entity that last updated the JIT request. """ return pulumi.get(self, "updated_by") class AwaitableGetJitRequestResult(GetJitRequestResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetJitRequestResult( application_resource_id=self.application_resource_id, created_by=self.created_by, jit_authorization_policies=self.jit_authorization_policies, jit_request_state=self.jit_request_state, jit_scheduling_policy=self.jit_scheduling_policy, location=self.location, name=self.name, provisioning_state=self.provisioning_state, publisher_tenant_id=self.publisher_tenant_id, tags=self.tags, type=self.type, updated_by=self.updated_by) def get_jit_request(jit_request_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJitRequestResult: """ Use this data source to access information about an existing resource. :param str jit_request_name: The name of the JIT request. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['jitRequestName'] = jit_request_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:solutions/v20200821preview:getJitRequest', __args__, opts=opts, typ=GetJitRequestResult).value return AwaitableGetJitRequestResult( application_resource_id=__ret__.application_resource_id, created_by=__ret__.created_by, jit_authorization_policies=__ret__.jit_authorization_policies, jit_request_state=__ret__.jit_request_state, jit_scheduling_policy=__ret__.jit_scheduling_policy, location=__ret__.location, name=__ret__.name, provisioning_state=__ret__.provisioning_state, publisher_tenant_id=__ret__.publisher_tenant_id, tags=__ret__.tags, type=__ret__.type, updated_by=__ret__.updated_by)
38.866029
275
0.674997
dc8a7748b334210478a58677b45df1a3bc404ade
13,492
py
Python
ncc/tasks/translation/plbart_translation.py
iwangyuezhang/naturalcc
e9d9b4a296b61199fc35779b062db2205935a608
[ "MIT" ]
1
2022-03-30T14:45:42.000Z
2022-03-30T14:45:42.000Z
ncc/tasks/translation/plbart_translation.py
hrshy0629/naturalcc
9c3329dd8387c8242deb52bf590ebe3ac795f8de
[ "MIT" ]
null
null
null
ncc/tasks/translation/plbart_translation.py
hrshy0629/naturalcc
9c3329dd8387c8242deb52bf590ebe3ac795f8de
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import json import os from functools import lru_cache import numpy as np import torch import itertools from ncc import ( tokenizers, LOGGER, ) from ncc.data import ( indexed_dataset, ) from ncc.data.dictionary import Dictionary from ncc.data.ncc_dataset import NccDataset from ncc.data.wrappers.append_token_dataset import AppendTokenDataset from ncc.data.wrappers.portion_dataset import PortionDataset from ncc.data.wrappers.prepend_token_dataset import PrependTokenDataset from ncc.data.wrappers.strip_token_dataset import StripTokenDataset from ncc.data.wrappers.truncate_dataset import TruncateDataset from ncc.eval.summarization import summarization_metrics from ncc.tasks import register_task from ncc.tasks.ncc_task import NccTask from ncc.tokenizers import tokenization from ncc.utils import utils from ncc.utils.logging import metrics from ncc.data import constants from ncc.data.translation.plbart_pair_dataset import PLBartPairDataset from ncc.utils.file_ops import ( file_io, json_io, ) EVAL_BLEU_ORDER = 4 def _load_dataset(path, impl, dict): if impl == 'mmap': # mmap dataset has been numberized, no need for dict src_dataset = indexed_dataset.MMapIndexedDataset(path=path) elif impl == 'pkl': src_dataset = file_io.open(f"{path}.pkl", 'rb') else: raise NotImplementedError("No such {} dataset implementation.".format(impl)) return src_dataset def load_langpair_dataset( data_path, split, src_dict, tgt_dict, src_lang, tgt_lang, dataset_impl, max_source_positions=None, max_target_positions=None, ): # load source dataset src_path = os.path.join(data_path, src_lang, f'{split}.code_tokens') src_tokens = _load_dataset(path=src_path, impl=dataset_impl, dict=src_dict) src_code = None if split != 'train': src_code = [src_dict.string(src_tokens[idx], bpe_symbol='sentencepiece') for idx in range(len(src_tokens))] src_tokens = AppendTokenDataset( TruncateDataset( StripTokenDataset(src_tokens, src_dict.eos()), max_source_positions - 2, ), src_dict.eos() ) src_tokens = AppendTokenDataset(src_tokens, src_dict.index('[{}]'.format(src_lang))) LOGGER.info('truncate {}/{}.code_tokens to {}'.format(src_lang, split, max_source_positions)) # load target dataset tgt_path = os.path.join(data_path, tgt_lang, f'{split}.code_tokens') tgt_tokens = _load_dataset(path=tgt_path, impl=dataset_impl, dict=tgt_dict) tgt_code = None if split != 'train': tgt_code = [tgt_dict.string(tgt_tokens[idx], bpe_symbol='sentencepiece') for idx in range(len(tgt_tokens))] tgt_tokens = AppendTokenDataset( TruncateDataset( StripTokenDataset(tgt_tokens, tgt_dict.eos()), max_target_positions - 2, ), tgt_dict.eos() ) tgt_tokens = AppendTokenDataset(tgt_tokens, tgt_dict.index('[{}]'.format(tgt_lang))) LOGGER.info('truncate {}/{}.code_tokens to {}'.format(tgt_lang, split, max_target_positions)) return PLBartPairDataset( src_dict, tgt_dict, src_tokens, src_tokens.sizes, src_code=src_code, tgt=tgt_tokens, tgt_sizes=tgt_tokens.sizes, tgt_code=tgt_code, src_lang=src_lang, tgt_lang=tgt_lang, max_source_positions=max_source_positions, max_target_positions=max_target_positions, eos=tgt_dict.index('[{}]'.format(tgt_lang)), shuffle=(split == 'train'), ) @register_task('plbart_translation') class PLBartTranslationTask(NccTask): def __init__(self, args, src_dict, tgt_dict): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ paths = utils.split_paths(args['task']['data']) assert len(paths) > 0 # load dictionaries dict = cls.load_dictionary(os.path.join(paths[0], 'dict.jsonl')) for l in ['java', 'python', 'en_XX']: dict.add_symbol("[{}]".format(l)) dict.add_symbol(constants.MASK) if f"[{args['task']['source_lang']}]" not in dict: dict.add_symbol(f"[{args['task']['source_lang']}]") if f"[{args['task']['target_lang']}]" not in dict: dict.add_symbol(f"[{args['task']['target_lang']}]") src_dict = tgt_dict = dict assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() LOGGER.info('[{}] dictionary: {} types'.format(args['task']['source_lang'], len(src_dict))) LOGGER.info('[{}] dictionary: {} types'.format(args['task']['target_lang'], len(tgt_dict))) return cls(args, src_dict, tgt_dict) @classmethod def build_dictionary( cls, filenames, tokenize_func, workers=1, threshold=-1, nwords=-1, padding_factor=1, **special_symbols, ): """Build the dictionary Args: filenames (list): list of filenames workers (int): number of concurrent workers threshold (int): defines the minimum word count nwords (int): defines the total number of words in the final dictionary, including special symbols padding_factor (int): can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ from ncc.data import constants d = Dictionary( pad=special_symbols.get('pad', constants.PAD), bos=special_symbols.get('bos', constants.BOS), eos=special_symbols.get('eos', constants.EOS), unk=special_symbols.get('unk', constants.UNK), extra_special_symbols=special_symbols.get('extra_special_symbols', None), ) for filename in filenames: Dictionary.add_token_to_dictionary( filename, d, tokenize_func, workers ) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args['task']['data']) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] self.datasets[split] = load_langpair_dataset( data_path, split, src_dict=self.src_dict, tgt_dict=self.tgt_dict, src_lang=self.args['task']['source_lang'], tgt_lang=self.args['task']['target_lang'], dataset_impl=self.args['dataset']['dataset_impl'], max_source_positions=self.args['task']['max_source_positions'], max_target_positions=self.args['task']['max_target_positions'], ) def build_model(self, args): model = super().build_model(args) if args['task']['eval_bleu']: assert args['task']['eval_bleu_detok'] is not None, ( '--eval-bleu-detok is required if using --eval-bleu; ' 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space ' 'to disable detokenization, e.g., when using sentencepiece)' ) detok_args = json.loads( args['task']['eval_bleu_detok_args'] if args['task']['eval_bleu_detok_args'] else '{}' ) self.tokenizer = tokenizers.build_tokenizer( dict(tokenizer=args['task'].get('eval_bleu_detok', '{}'), **detok_args) ) gen_args = args['task']['eval_bleu_args'] or {} self.sequence_generator = self.build_generator( [model], args, **gen_args, eos=self.tgt_dict.index(f"[{self.args['task']['target_lang']}]") ) return model @property def target_dictionary(self): return self.src_dict @property def source_dictionary(self): return self.tgt_dict def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) mode = self.args['dataset']['valid_subset'] def decode(toks, escape_unk=False, trunc_eos=True): s = self.tgt_dict.string( toks.int().cpu(), self.args['task']['eval_bleu_remove_bpe'], extra_symbols_to_ignore=[self.dataset(mode).eos], escape_unk=escape_unk, trunc_eos=trunc_eos, ) if self.tokenizer: s = self.tokenizer.decode(s) if len(s) == 0: s = '0' # if predict sentence is null, use '0' return s if self.args['task']['eval_bleu']: gen_out = self.inference_step(self.sequence_generator, [model], sample, bos_token=self.dataset(mode).eos) ids = sample['id'].tolist() hyps, refs = [], [] for i in range(len(ids)): hyps.append(decode(gen_out[i][0]['tokens'])) # refs.append(decode( # utils.strip_pad(sample['target'][i], self.tgt_dict.pad()), # escape_unk=True, # don't count <unk> as matches to the hypo # )) refs.append( self.dataset(mode).tgt_code[sample['id'][i].item()] ) if self.args['task']['eval_with_sacrebleu']: import sacrebleu tokenize = sacrebleu.DEFAULT_TOKENIZER if not self.args['task']['eval_tokenized_bleu'] else 'none' bleu = sacrebleu.corpus_bleu(hyps, [refs], tokenize=tokenize) logging_output['_bleu_sys_len'] = bleu.sys_len logging_output['_bleu_ref_len'] = bleu.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(bleu.counts) == EVAL_BLEU_ORDER for i in range(EVAL_BLEU_ORDER): logging_output['_bleu_counts_' + str(i)] = bleu.counts[i] logging_output['_bleu_totals_' + str(i)] = bleu.totals[i] else: bleu, rouge_l, meteor = self._inference_score(hyps, refs, ids) logging_output['bleu'] = round(bleu, 4) logging_output['rouge_l'] = round(rouge_l, 4) logging_output['meteor'] = round(meteor, 4) return loss, sample_size, logging_output def _inference_score(self, hyps, refs, ids): hypotheses, references = dict(), dict() for key, pred, tgt in zip(ids, hyps, refs): hypotheses[key] = [pred] references[key] = tgt if isinstance(tgt, list) else [tgt] bleu, rouge_l, meteor = summarization_metrics.eval_accuracies(hypotheses, references) return bleu, rouge_l, meteor def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.args['task']['eval_bleu']: if self.args['task']['eval_with_sacrebleu']: def sum_logs(key): import torch result = sum(log.get(key, 0) for log in logging_outputs) if torch.is_tensor(result): result = result.cpu() return result counts, totals = [], [] for i in range(EVAL_BLEU_ORDER): counts.append(sum_logs('_bleu_counts_' + str(i))) totals.append(sum_logs('_bleu_totals_' + str(i))) if max(totals) > 0: # log counts as numpy arrays -- log_scalar will sum them correctly metrics.log_scalar('_bleu_counts', np.array(counts)) metrics.log_scalar('_bleu_totals', np.array(totals)) metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len')) metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len')) def compute_bleu(meters): import inspect import sacrebleu fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0] if 'smooth_method' in fn_sig: smooth = {'smooth_method': 'exp'} else: smooth = {'smooth': 'exp'} bleu = sacrebleu.compute_bleu( correct=meters['_bleu_counts'].sum, total=meters['_bleu_totals'].sum, sys_len=meters['_bleu_sys_len'].sum, ref_len=meters['_bleu_ref_len'].sum, **smooth ) return round(bleu.score, 6) metrics.log_derived('bleu', compute_bleu) else: def sum_logs(key): return sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar('bleu', sum_logs('bleu'), round=6)
40.516517
117
0.596946
8f5e9208f2982e993596473bdb6f9944605c44ef
2,153
py
Python
setup.py
dbaelipro/CHAID
2f84e94c5bc1ed672ae0730e6e3477cb877acd2c
[ "Apache-2.0" ]
null
null
null
setup.py
dbaelipro/CHAID
2f84e94c5bc1ed672ae0730e6e3477cb877acd2c
[ "Apache-2.0" ]
null
null
null
setup.py
dbaelipro/CHAID
2f84e94c5bc1ed672ae0730e6e3477cb877acd2c
[ "Apache-2.0" ]
null
null
null
"""A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/Rambatino/CHAID """ import re from os import path from setuptools import setup, find_packages def get_version(): """ Read version from __init__.py """ version_regex = re.compile( '__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*(-(alpha|beta|rc)(\\.\\d+)?)?)(?P=q)' ) here = path.abspath(path.dirname(__file__)) init_location = path.join(here, "CHAID/__init__.py") with open(init_location) as init_file: for line in init_file: match = version_regex.search(line) if not match: raise Exception( "Couldn't read version information from '{0}'".format(init_location) ) return match.group('version') setup( name='CHAID', version=get_version(), description='A CHAID tree building algorithm', long_description="This package provides a python implementation of the Chi-Squared Automatic Inference Detection (CHAID) decision tree", url='https://github.com/Rambatino/CHAID', author='Mark Ramotowski, Richard Fitzgerald', author_email='mark.tint.ramotowski@gmail.com', license='Apache License 2.0', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='CHAID pandas numpy scipy statistics statistical analysis', packages=find_packages(exclude=['contrib', 'docs', 'tests']), install_requires=[ 'cython', 'numpy', 'pandas', 'treelib', 'pytest', 'scipy', 'savReaderWriter', 'graphviz', 'plotly', 'colorlover', 'enum34; python_version == "2.7"' ], extras_require={ 'test': ['codecov', 'tox', 'tox-pyenv', 'detox', 'pytest', 'pytest-cov'], } )
30.757143
140
0.616349
5be99fc66edf4ba45427e8dcbaf3fc1e79196d25
2,706
py
Python
tensortrade/base/exceptions.py
cihilt/tensortrade
47b8f2f043d3cc430838aac02a915ab42dcc7b64
[ "Apache-2.0" ]
7
2020-09-28T23:36:40.000Z
2022-02-22T02:00:32.000Z
tensortrade/base/exceptions.py
cihilt/tensortrade
47b8f2f043d3cc430838aac02a915ab42dcc7b64
[ "Apache-2.0" ]
4
2020-11-13T18:48:52.000Z
2022-02-10T01:29:47.000Z
tensortrade/base/exceptions.py
cihilt/tensortrade
47b8f2f043d3cc430838aac02a915ab42dcc7b64
[ "Apache-2.0" ]
3
2020-11-23T17:31:59.000Z
2021-04-08T10:55:03.000Z
# ============================================================================= # Quantity Exceptions # ============================================================================= class InvalidNegativeQuantity(Exception): def __init__(self, size, *args): super().__init__( "Invalid Quantity: {}. Amounts cannot be negative.".format(size), *args ) class InvalidNonNumericQuantity(Exception): def __init(self, size, *args): super().__init__( "Invalid Quantity: {}. Amounts cannot be non-numeric.".format(size), *args ) class QuantityOpPathMismatch(Exception): def __init(self, left_id, right_id, *args): super().__init__( "Invalid operation between quantities with unequal path id: {} {}.".format(left_id, right_id), *args ) # ============================================================================= # Instrument Exceptions # ============================================================================= class IncompatibleInstrumentOperation(Exception): def __init__(self, left, right, *args): super().__init__( "Instruments are not of the same type ({} and {}).".format(left, right), *args ) # ============================================================================= # Order Exceptions # ============================================================================= class InvalidOrderQuantity(Exception): def __init__(self, size, *args): super().__init__( "Invalid Quantity: {}. Order sizes must be positive.".format(size), *args ) class IncompatibleRecipePath(Exception): def __init__(self, order, recipe, *args): super().__init__( "Incompatible {} following {}.".format(order, recipe), *args ) # ============================================================================= # Wallet Exceptions # ============================================================================= class InsufficientFundsForAllocation(Exception): def __init__(self, balance, size, *args): super().__init__( "Insufficient funds for allocating size {} with balance {}.".format(size, balance), *args ) # ============================================================================= # Trading Pair Exceptions # ============================================================================= class InvalidTradingPair(Exception): def __init__(self, base, quote,*args): super().__init__( "Invalid instrument pair {}/{}.".format(base, quote), *args )
30.75
106
0.423503
5c814666f74c7f50b5e62662d2bb00032cebb340
1,819
py
Python
src/objects.py
pylaligand/league_predictions
68b0b9534344a319a24088a4ffce1fd8ec053e8a
[ "Apache-2.0" ]
null
null
null
src/objects.py
pylaligand/league_predictions
68b0b9534344a319a24088a4ffce1fd8ec053e8a
[ "Apache-2.0" ]
null
null
null
src/objects.py
pylaligand/league_predictions
68b0b9534344a319a24088a4ffce1fd8ec053e8a
[ "Apache-2.0" ]
null
null
null
class Team(object): def __init__(self, name): self.name = name def __str__(self): return self.name def __eq__(self, other): return self.name == other.name def __hash__(self): return hash((self.name)) class Game(object): def __init__(self, team_1, score_1, team_2, score_2): self.team_1 = team_1 self.score_1 = score_1 self.team_2 = team_2 self.score_2 = score_2 def played(self): return self.score_1 >= 0 and self.score_2 >= 0 def update_scores(self, other): self.score_1 = other.score_1 self.score_2 = other.score_2 def __eq__(self, other): return self.team_1 == other.team_1 and self.team_2 == other.team_2 def __hash__(self): return hash((self.team_1, self.team_2)) def __str__(self): if self.played(): return '%s(%d) v %s(%d)' % (self.team_1, self.score_1, self.team_2, self.score_2) else: return '%s v %s' % (self.team_1, self.team_2) def __repr__(self): return self.__str__() class Gameday(object): def __init__(self, name): self.name = name self.games = [] def add_game(self, game): self.games.append(game) def __str__(self): return self.name class Season(object): def __init__(self): self.gamedays = [] def __iter__(self): return self.gamedays def __str__(self): return 'Season{%d}' % len(self.gamedays) def add_gameday(self, gameday): self.gamedays.append(gameday) def teams(self): result = {} for g in self.gamedays[0].games: result[g.team_1] = Team(g.team_1) result[g.team_2] = Team(g.team_2) return result
23.623377
79
0.572842
b2e8b99688ee6925e623b5afa5e42d748889c648
1,692
py
Python
bgx/telebot/setup.py
DGT-Network/DGT-Kawartha-1.0
dfe177f1a10d260949075f96422df34620d0bd54
[ "Apache-2.0" ]
null
null
null
bgx/telebot/setup.py
DGT-Network/DGT-Kawartha-1.0
dfe177f1a10d260949075f96422df34620d0bd54
[ "Apache-2.0" ]
null
null
null
bgx/telebot/setup.py
DGT-Network/DGT-Kawartha-1.0
dfe177f1a10d260949075f96422df34620d0bd54
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 DGT NETWORK INC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ from __future__ import print_function import os import subprocess from setuptools import setup, find_packages conf_dir = "/etc/bgx" data_files = [ (conf_dir, ['packaging/bot_api.toml.example']) ] if os.path.exists("/etc/default"): data_files.append( ('/etc/default', ['packaging/systemd/bgt-bot-api'])) if os.path.exists("/lib/systemd/system"): data_files.append(('/lib/systemd/system', ['packaging/systemd/bgt-bot-api.service'])) setup( name='bgt-bot-api', version=subprocess.check_output( ['../../bin/get_version']).decode('utf-8').strip(), description='BGT BOT API', author='Hyperledger Sawtooth BGT', url='https://github.com/hyperledger/sawtooth-core', packages=find_packages(), install_requires=[ 'aiodns', 'aiohttp>=2.3.2', 'cchardet', 'protobuf', 'sawtooth-sdk', 'pyformance','cbor' ], data_files=data_files, entry_points={ 'console_scripts': ['bgt-bot-api = bgt_bot_api.bot_api:main'] })
31.333333
80
0.6513
0ec8808e393e49adfdad8c19546a904621eaf3fb
86
py
Python
misc/parameters.py
krsnvss/beetstock
689d52c3e4dfa6533c59e5c4bf9e9f6f60cb688c
[ "MIT" ]
null
null
null
misc/parameters.py
krsnvss/beetstock
689d52c3e4dfa6533c59e5c4bf9e9f6f60cb688c
[ "MIT" ]
null
null
null
misc/parameters.py
krsnvss/beetstock
689d52c3e4dfa6533c59e5c4bf9e9f6f60cb688c
[ "MIT" ]
null
null
null
# Различные задержки и таймеры # Интервал обновления таблиц (ms) table_update = 5000
17.2
33
0.77907
d68fc8e2cd05d5458aec6a15754e2fb42978f87c
20
py
Python
__init__.py
dpk/ikwi
14073d935eb012594576d3715af27a796868cda6
[ "CC0-1.0" ]
4
2015-05-03T17:26:28.000Z
2015-05-28T18:19:07.000Z
__init__.py
dpk/ikwi
14073d935eb012594576d3715af27a796868cda6
[ "CC0-1.0" ]
null
null
null
__init__.py
dpk/ikwi
14073d935eb012594576d3715af27a796868cda6
[ "CC0-1.0" ]
null
null
null
from .ikwi import *
10
19
0.7
d7f1c1fc71d0bf49a46049413342747cb94b64c0
1,947
py
Python
tools/misc/print_config.py
Justice-Eternal/mmrotate
c5bf348562fd84cc17906c2cf370d1a49fcd3035
[ "Apache-2.0" ]
449
2022-02-18T08:26:58.000Z
2022-03-31T11:58:32.000Z
tools/misc/print_config.py
GamblerZSY/mmrotate
6519a3654e17b707c15d4aa2c5db1257587ea4c0
[ "Apache-2.0" ]
162
2022-02-18T09:54:46.000Z
2022-03-31T15:40:46.000Z
tools/misc/print_config.py
GamblerZSY/mmrotate
6519a3654e17b707c15d4aa2c5db1257587ea4c0
[ "Apache-2.0" ]
98
2022-02-18T08:28:48.000Z
2022-03-31T08:52:11.000Z
# Copyright (c) OpenMMLab. All rights reserved. import argparse import warnings from mmcv import Config, DictAction def parse_args(): """Parse arguments.""" parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config file path') parser.add_argument( '--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file (deprecate), ' 'change to --cfg-options instead.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both ' 'specified, --options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(): """Print config.""" args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) print(f'Config:\n{cfg.pretty_text}') if __name__ == '__main__': main()
33.568966
78
0.645609
edec14de2fc0dd239a9fb47cf70cd8d625177c07
8,620
py
Python
graph_objs/scatter3d/marker/colorbar/title/_font.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
graph_objs/scatter3d/marker/colorbar/title/_font.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
graph_objs/scatter3d/marker/colorbar/title/_font.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Font(_BaseTraceHierarchyType): # class properties # -------------------- _parent_path_str = "scatter3d.marker.colorbar.title" _path_str = "scatter3d.marker.colorbar.title.font" _valid_props = {"color", "family", "size"} # color # ----- @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val # family # ------ @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.new_plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val # size # ---- @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size """ def __init__(self, arg=None, color=None, family=None, size=None, **kwargs): """ Construct a new Font object Sets this color bar's title font. Note that the title's font used to be set by the now deprecated `titlefont` attribute. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`new_plotly.graph_objs.scatter3d.mark er.colorbar.title.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size Returns ------- Font """ super(Font, self).__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the new_plotly.graph_objs.scatter3d.marker.colorbar.title.Font constructor must be a dict or an instance of :class:`new_plotly.graph_objs.scatter3d.marker.colorbar.title.Font`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v _v = arg.pop("family", None) _v = family if family is not None else _v if _v is not None: self["family"] = _v _v = arg.pop("size", None) _v = size if size is not None else _v if _v is not None: self["size"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
37.641921
85
0.569142
e19985ea4f8c1788461eef36fa808f13858029e2
1,647
py
Python
startup/ckpt.py
videoturingtest/vtt_qa_pipeline
83efc672fad0ca2356caede6f0a3875f054037fc
[ "MIT" ]
1
2019-09-19T07:49:21.000Z
2019-09-19T07:49:21.000Z
startup/ckpt.py
videoturingtest/vtt_qa_pipeline
83efc672fad0ca2356caede6f0a3875f054037fc
[ "MIT" ]
null
null
null
startup/ckpt.py
videoturingtest/vtt_qa_pipeline
83efc672fad0ca2356caede6f0a3875f054037fc
[ "MIT" ]
null
null
null
import os import torch from torch import nn import torch.nn.functional as F from dataset import get_iterator from model import get_model from utils import get_dirname_from_args def get_ckpt_path(args, epoch, loss): ckpt_name = get_dirname_from_args(args) ckpt_path = args.ckpt_path / ckpt_name args.ckpt_path.mkdir(exist_ok=True) ckpt_path.mkdir(exist_ok=True) loss = '{:.4f}'.format(loss) ckpt_path = ckpt_path / \ f'loss_{loss}_epoch_{epoch}.pickle' return ckpt_path def save_ckpt(args, epoch, loss, model, vocab): print(f'saving epoch {epoch}') dt = { 'args': args, 'epoch': epoch, 'loss': loss, 'model': model.state_dict(), 'vocab': vocab, } ckpt_path = get_ckpt_path(args, epoch, loss) print(f"Saving checkpoint {ckpt_path}") torch.save(dt, ckpt_path) def get_model_ckpt(args): ckpt_available = args.ckpt_name is not None if ckpt_available: name = f'{args.ckpt_name}' name = f'{name}*' if not name.endswith('*') else name ckpt_paths = sorted(args.ckpt_path.glob(f'{name}'), reverse=False) assert len(ckpt_paths) > 0, f"no ckpt candidate for {args.ckpt_path / args.ckpt_name}" ckpt_path = ckpt_paths[0] # monkey patch for choosing the best ckpt print(f"loading from {ckpt_path}") dt = torch.load(ckpt_path) args.update(dt['args']) vocab = dt['vocab'] iters, vocab = get_iterator(args, vocab) model = get_model(args, vocab) if ckpt_available: model.load_state_dict(dt['model']) return args, model, iters, vocab, ckpt_available
28.396552
94
0.656952
fb84a528f1a6c308974cbcb55a9595203bb47da2
54,019
py
Python
vspk/v6/nugateway.py
axxyhtrx/vspk-python
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
[ "BSD-3-Clause" ]
null
null
null
vspk/v6/nugateway.py
axxyhtrx/vspk-python
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
[ "BSD-3-Clause" ]
null
null
null
vspk/v6/nugateway.py
axxyhtrx/vspk-python
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .fetchers import NUL2DomainsFetcher from .fetchers import NUMACFilterProfilesFetcher from .fetchers import NUSAPEgressQoSProfilesFetcher from .fetchers import NUSAPIngressQoSProfilesFetcher from .fetchers import NUGatewaySecuritiesFetcher from .fetchers import NUPATNATPoolsFetcher from .fetchers import NUDeploymentFailuresFetcher from .fetchers import NUPermissionsFetcher from .fetchers import NUWANServicesFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUEgressProfilesFetcher from .fetchers import NUAlarmsFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUInfrastructureConfigsFetcher from .fetchers import NUIngressProfilesFetcher from .fetchers import NUEnterprisePermissionsFetcher from .fetchers import NUJobsFetcher from .fetchers import NULocationsFetcher from .fetchers import NUDomainsFetcher from .fetchers import NUBootstrapsFetcher from .fetchers import NUBootstrapActivationsFetcher from .fetchers import NUPortsFetcher from .fetchers import NURoutingPoliciesFetcher from .fetchers import NUIPFilterProfilesFetcher from .fetchers import NUIPv6FilterProfilesFetcher from .fetchers import NUSubnetsFetcher from .fetchers import NUEventLogsFetcher from bambou import NURESTObject class NUGateway(NURESTObject): """ Represents a Gateway in the VSD Notes: Represents Gateway object. """ __rest_name__ = "gateway" __resource_name__ = "gateways" ## Constants CONST_FAMILY_NSG_C = "NSG_C" CONST_PERMITTED_ACTION_ALL = "ALL" CONST_FAMILY_NSG_E = "NSG_E" CONST_PERSONALITY_EVDF = "EVDF" CONST_PERSONALITY_NUAGE_210_WBX_32_Q = "NUAGE_210_WBX_32_Q" CONST_ZFB_MATCH_ATTRIBUTE_MAC_ADDRESS = "MAC_ADDRESS" CONST_FAMILY_NSG_V = "NSG_V" CONST_VENDOR_NOKIA = "NOKIA" CONST_VENDOR_CISCO = "CISCO" CONST_BOOTSTRAP_STATUS_ACTIVE = "ACTIVE" CONST_FAMILY_NSG_X = "NSG_X" CONST_ZFB_MATCH_ATTRIBUTE_IP_ADDRESS = "IP_ADDRESS" CONST_FAMILY_VRS = "VRS" CONST_FAMILY_NSG_E200 = "NSG_E200" CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT" CONST_PERSONALITY_EVDFB = "EVDFB" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_PERSONALITY_OTHER = "OTHER" CONST_ZFB_MATCH_ATTRIBUTE_HOSTNAME = "HOSTNAME" CONST_PERSONALITY_VDFG = "VDFG" CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK" CONST_PERMITTED_ACTION_EXTEND = "EXTEND" CONST_BOOTSTRAP_STATUS_QUARANTINED = "QUARANTINED" CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE" CONST_PERSONALITY_DC7X50 = "DC7X50" CONST_BOOTSTRAP_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED" CONST_FAMILY_NSG_AZ = "NSG_AZ" CONST_FAMILY_ANY = "ANY" CONST_ZFB_MATCH_ATTRIBUTE_NONE = "NONE" CONST_PERSONALITY_VSA = "VSA" CONST_PERSONALITY_VSG = "VSG" CONST_PERMITTED_ACTION_READ = "READ" CONST_PERSONALITY_UNMANAGED_GATEWAY = "UNMANAGED_GATEWAY" CONST_PERSONALITY_VRSB = "VRSB" CONST_PERMITTED_ACTION_USE = "USE" CONST_PERSONALITY_NETCONF_7X50 = "NETCONF_7X50" CONST_PERSONALITY_NUAGE_210_WBX_48_S = "NUAGE_210_WBX_48_S" CONST_FAMILY_NSG_X200 = "NSG_X200" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_PERSONALITY_SR_LINUX = "SR_LINUX" CONST_FAMILY_NSG_E300 = "NSG_E300" CONST_PERSONALITY_VRSG = "VRSG" CONST_ZFB_MATCH_ATTRIBUTE_SERIAL_NUMBER = "SERIAL_NUMBER" CONST_ZFB_MATCH_ATTRIBUTE_UUID = "UUID" CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP" CONST_PERSONALITY_NETCONF_THIRDPARTY_HW_VTEP = "NETCONF_THIRDPARTY_HW_VTEP" CONST_FAMILY_NSG_AMI = "NSG_AMI" CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY" CONST_BOOTSTRAP_STATUS_REVOKED = "REVOKED" CONST_BOOTSTRAP_STATUS_INACTIVE = "INACTIVE" def __init__(self, **kwargs): """ Initializes a Gateway instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> gateway = NUGateway(id=u'xxxx-xxx-xxx-xxx', name=u'Gateway') >>> gateway = NUGateway(data=my_dict) """ super(NUGateway, self).__init__() # Read/Write Attributes self._mac_address = None self._zfb_match_attribute = None self._zfb_match_value = None self._bios_release_date = None self._bios_version = None self._cpu_type = None self._uuid = None self._name = None self._family = None self._management_id = None self._last_updated_by = None self._last_updated_date = None self._datapath_id = None self._patches = None self._gateway_config_raw_version = None self._gateway_config_version = None self._gateway_connected = None self._gateway_model = None self._gateway_version = None self._native_vlan = None self._redundancy_group_id = None self._peer = None self._template_id = None self._pending = None self._vendor = None self._serial_number = None self._permitted_action = None self._personality = None self._description = None self._libraries = None self._embedded_metadata = None self._enterprise_id = None self._entity_scope = None self._location_id = None self._bootstrap_id = None self._bootstrap_status = None self._creation_date = None self._product_name = None self._use_gateway_vlanvnid = None self._associated_gnmi_profile_id = None self._associated_gateway_security_id = None self._associated_gateway_security_profile_id = None self._associated_nsg_info_id = None self._associated_netconf_profile_id = None self._vtep = None self._auto_disc_gateway_id = None self._owner = None self._external_id = None self._system_id = None self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="zfb_match_attribute", remote_name="ZFBMatchAttribute", attribute_type=str, is_required=False, is_unique=False, choices=[u'HOSTNAME', u'IP_ADDRESS', u'MAC_ADDRESS', u'NONE', u'SERIAL_NUMBER', u'UUID']) self.expose_attribute(local_name="zfb_match_value", remote_name="ZFBMatchValue", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="bios_release_date", remote_name="BIOSReleaseDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="bios_version", remote_name="BIOSVersion", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'NSG_AMI', u'NSG_AZ', u'NSG_C', u'NSG_E', u'NSG_E200', u'NSG_E300', u'NSG_V', u'NSG_X', u'NSG_X200', u'VRS']) self.expose_attribute(local_name="management_id", remote_name="managementID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="datapath_id", remote_name="datapathID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="patches", remote_name="patches", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_config_raw_version", remote_name="gatewayConfigRawVersion", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_config_version", remote_name="gatewayConfigVersion", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_connected", remote_name="gatewayConnected", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_model", remote_name="gatewayModel", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_version", remote_name="gatewayVersion", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="native_vlan", remote_name="nativeVLAN", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="redundancy_group_id", remote_name="redundancyGroupID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="peer", remote_name="peer", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="pending", remote_name="pending", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="vendor", remote_name="vendor", attribute_type=str, is_required=False, is_unique=False, choices=[u'CISCO', u'NOKIA']) self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE']) self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'DC7X50', u'EVDF', u'EVDFB', u'HARDWARE_VTEP', u'NETCONF_7X50', u'NETCONF_THIRDPARTY_HW_VTEP', u'NUAGE_210_WBX_32_Q', u'NUAGE_210_WBX_48_S', u'OTHER', u'SR_LINUX', u'UNMANAGED_GATEWAY', u'VDFG', u'VRSB', u'VRSG', u'VSA', u'VSG']) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="libraries", remote_name="libraries", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="location_id", remote_name="locationID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="bootstrap_id", remote_name="bootstrapID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="bootstrap_status", remote_name="bootstrapStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT', u'QUARANTINED', u'REVOKED']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="product_name", remote_name="productName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="use_gateway_vlanvnid", remote_name="useGatewayVLANVNID", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_gnmi_profile_id", remote_name="associatedGNMIProfileID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_gateway_security_id", remote_name="associatedGatewaySecurityID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_gateway_security_profile_id", remote_name="associatedGatewaySecurityProfileID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_nsg_info_id", remote_name="associatedNSGInfoID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_netconf_profile_id", remote_name="associatedNetconfProfileID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="auto_disc_gateway_id", remote_name="autoDiscGatewayID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False) # Fetchers self.l2_domains = NUL2DomainsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.mac_filter_profiles = NUMACFilterProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.sap_egress_qo_s_profiles = NUSAPEgressQoSProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.sap_ingress_qo_s_profiles = NUSAPIngressQoSProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.gateway_securities = NUGatewaySecuritiesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.patnat_pools = NUPATNATPoolsFetcher.fetcher_with_object(parent_object=self, relationship="member") self.deployment_failures = NUDeploymentFailuresFetcher.fetcher_with_object(parent_object=self, relationship="child") self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.wan_services = NUWANServicesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.egress_profiles = NUEgressProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.infrastructure_configs = NUInfrastructureConfigsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ingress_profiles = NUIngressProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.enterprise_permissions = NUEnterprisePermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.locations = NULocationsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.domains = NUDomainsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.bootstraps = NUBootstrapsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.bootstrap_activations = NUBootstrapActivationsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ports = NUPortsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.routing_policies = NURoutingPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ip_filter_profiles = NUIPFilterProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ipv6_filter_profiles = NUIPv6FilterProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.subnets = NUSubnetsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) # Properties @property def mac_address(self): """ Get mac_address value. Notes: MAC Address of the first interface This attribute is named `MACAddress` in VSD API. """ return self._mac_address @mac_address.setter def mac_address(self, value): """ Set mac_address value. Notes: MAC Address of the first interface This attribute is named `MACAddress` in VSD API. """ self._mac_address = value @property def zfb_match_attribute(self): """ Get zfb_match_attribute value. Notes: The Zero Factor Bootstrapping (ZFB) Attribute that should be used to match the gateway on when it tries to bootstrap. This attribute is named `ZFBMatchAttribute` in VSD API. """ return self._zfb_match_attribute @zfb_match_attribute.setter def zfb_match_attribute(self, value): """ Set zfb_match_attribute value. Notes: The Zero Factor Bootstrapping (ZFB) Attribute that should be used to match the gateway on when it tries to bootstrap. This attribute is named `ZFBMatchAttribute` in VSD API. """ self._zfb_match_attribute = value @property def zfb_match_value(self): """ Get zfb_match_value value. Notes: The Zero Factor Bootstrapping (ZFB) value that needs to match with the gateway during the bootstrap attempt. This value needs to match with the ZFB Match Attribute. This attribute is named `ZFBMatchValue` in VSD API. """ return self._zfb_match_value @zfb_match_value.setter def zfb_match_value(self, value): """ Set zfb_match_value value. Notes: The Zero Factor Bootstrapping (ZFB) value that needs to match with the gateway during the bootstrap attempt. This value needs to match with the ZFB Match Attribute. This attribute is named `ZFBMatchValue` in VSD API. """ self._zfb_match_value = value @property def bios_release_date(self): """ Get bios_release_date value. Notes: Release Date of the BIOS. The format can vary based on the manufacturer but normally includes year/month/day or year/week details (eg. 01/01/2011 or 2018/06/15 or 2018/22) This attribute is named `BIOSReleaseDate` in VSD API. """ return self._bios_release_date @bios_release_date.setter def bios_release_date(self, value): """ Set bios_release_date value. Notes: Release Date of the BIOS. The format can vary based on the manufacturer but normally includes year/month/day or year/week details (eg. 01/01/2011 or 2018/06/15 or 2018/22) This attribute is named `BIOSReleaseDate` in VSD API. """ self._bios_release_date = value @property def bios_version(self): """ Get bios_version value. Notes: BIOS Version (eg. 0.5.1) This attribute is named `BIOSVersion` in VSD API. """ return self._bios_version @bios_version.setter def bios_version(self, value): """ Set bios_version value. Notes: BIOS Version (eg. 0.5.1) This attribute is named `BIOSVersion` in VSD API. """ self._bios_version = value @property def cpu_type(self): """ Get cpu_type value. Notes: The Processor Type as reported during bootstrapping. This attribute is named `CPUType` in VSD API. """ return self._cpu_type @cpu_type.setter def cpu_type(self, value): """ Set cpu_type value. Notes: The Processor Type as reported during bootstrapping. This attribute is named `CPUType` in VSD API. """ self._cpu_type = value @property def uuid(self): """ Get uuid value. Notes: UUID of the device This attribute is named `UUID` in VSD API. """ return self._uuid @uuid.setter def uuid(self, value): """ Set uuid value. Notes: UUID of the device This attribute is named `UUID` in VSD API. """ self._uuid = value @property def name(self): """ Get name value. Notes: Name of the Gateway """ return self._name @name.setter def name(self, value): """ Set name value. Notes: Name of the Gateway """ self._name = value @property def family(self): """ Get family value. Notes: The family type of the gateway based on common characteristics with other members of a particular variation of an NSG hardware or of a virtual deployment. """ return self._family @family.setter def family(self, value): """ Set family value. Notes: The family type of the gateway based on common characteristics with other members of a particular variation of an NSG hardware or of a virtual deployment. """ self._family = value @property def management_id(self): """ Get management_id value. Notes: The identifier of this gateway's management interface. This attribute is named `managementID` in VSD API. """ return self._management_id @management_id.setter def management_id(self, value): """ Set management_id value. Notes: The identifier of this gateway's management interface. This attribute is named `managementID` in VSD API. """ self._management_id = value @property def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ self._last_updated_by = value @property def last_updated_date(self): """ Get last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): """ Set last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ self._last_updated_date = value @property def datapath_id(self): """ Get datapath_id value. Notes: Identifier of the Gateway, based on the systemID which is generated when the instance is created in VSD. This attribute is named `datapathID` in VSD API. """ return self._datapath_id @datapath_id.setter def datapath_id(self, value): """ Set datapath_id value. Notes: Identifier of the Gateway, based on the systemID which is generated when the instance is created in VSD. This attribute is named `datapathID` in VSD API. """ self._datapath_id = value @property def patches(self): """ Get patches value. Notes: Patches that have been installed on the NSG """ return self._patches @patches.setter def patches(self, value): """ Set patches value. Notes: Patches that have been installed on the NSG """ self._patches = value @property def gateway_config_raw_version(self): """ Get gateway_config_raw_version value. Notes: Release version of gateway, which is used to determine the feature capabilties of gateway. This attribute is named `gatewayConfigRawVersion` in VSD API. """ return self._gateway_config_raw_version @gateway_config_raw_version.setter def gateway_config_raw_version(self, value): """ Set gateway_config_raw_version value. Notes: Release version of gateway, which is used to determine the feature capabilties of gateway. This attribute is named `gatewayConfigRawVersion` in VSD API. """ self._gateway_config_raw_version = value @property def gateway_config_version(self): """ Get gateway_config_version value. Notes: Interpreted version of gateway, which is used to determine the feature capabilities of gateway. This attribute is named `gatewayConfigVersion` in VSD API. """ return self._gateway_config_version @gateway_config_version.setter def gateway_config_version(self, value): """ Set gateway_config_version value. Notes: Interpreted version of gateway, which is used to determine the feature capabilities of gateway. This attribute is named `gatewayConfigVersion` in VSD API. """ self._gateway_config_version = value @property def gateway_connected(self): """ Get gateway_connected value. Notes: A boolean flag indicating the status of the gateway. This attribute is named `gatewayConnected` in VSD API. """ return self._gateway_connected @gateway_connected.setter def gateway_connected(self, value): """ Set gateway_connected value. Notes: A boolean flag indicating the status of the gateway. This attribute is named `gatewayConnected` in VSD API. """ self._gateway_connected = value @property def gateway_model(self): """ Get gateway_model value. Notes: The model string of the gateway. Applicable to netconf managed gateways This attribute is named `gatewayModel` in VSD API. """ return self._gateway_model @gateway_model.setter def gateway_model(self, value): """ Set gateway_model value. Notes: The model string of the gateway. Applicable to netconf managed gateways This attribute is named `gatewayModel` in VSD API. """ self._gateway_model = value @property def gateway_version(self): """ Get gateway_version value. Notes: The Gateway Software Version as reported during bootstrapping. This attribute is named `gatewayVersion` in VSD API. """ return self._gateway_version @gateway_version.setter def gateway_version(self, value): """ Set gateway_version value. Notes: The Gateway Software Version as reported during bootstrapping. This attribute is named `gatewayVersion` in VSD API. """ self._gateway_version = value @property def native_vlan(self): """ Get native_vlan value. Notes: Default Native VLAN to carry untagged traffic on the ports of this gateway. Applicable for Cisco 9K only. Possible values are 1-3967. This attribute is named `nativeVLAN` in VSD API. """ return self._native_vlan @native_vlan.setter def native_vlan(self, value): """ Set native_vlan value. Notes: Default Native VLAN to carry untagged traffic on the ports of this gateway. Applicable for Cisco 9K only. Possible values are 1-3967. This attribute is named `nativeVLAN` in VSD API. """ self._native_vlan = value @property def redundancy_group_id(self): """ Get redundancy_group_id value. Notes: The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute This attribute is named `redundancyGroupID` in VSD API. """ return self._redundancy_group_id @redundancy_group_id.setter def redundancy_group_id(self, value): """ Set redundancy_group_id value. Notes: The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute This attribute is named `redundancyGroupID` in VSD API. """ self._redundancy_group_id = value @property def peer(self): """ Get peer value. Notes: The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant. """ return self._peer @peer.setter def peer(self, value): """ Set peer value. Notes: The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant. """ self._peer = value @property def template_id(self): """ Get template_id value. Notes: The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway This attribute is named `templateID` in VSD API. """ return self._template_id @template_id.setter def template_id(self, value): """ Set template_id value. Notes: The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway This attribute is named `templateID` in VSD API. """ self._template_id = value @property def pending(self): """ Get pending value. Notes: Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST. """ return self._pending @pending.setter def pending(self, value): """ Set pending value. Notes: Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST. """ self._pending = value @property def vendor(self): """ Get vendor value. Notes: The vendor of the gateway. Applicable to netconf managed gateways """ return self._vendor @vendor.setter def vendor(self, value): """ Set vendor value. Notes: The vendor of the gateway. Applicable to netconf managed gateways """ self._vendor = value @property def serial_number(self): """ Get serial_number value. Notes: The device's serial number This attribute is named `serialNumber` in VSD API. """ return self._serial_number @serial_number.setter def serial_number(self, value): """ Set serial_number value. Notes: The device's serial number This attribute is named `serialNumber` in VSD API. """ self._serial_number = value @property def permitted_action(self): """ Get permitted_action value. Notes: The permitted action to USE/EXTEND this Gateway. This attribute is named `permittedAction` in VSD API. """ return self._permitted_action @permitted_action.setter def permitted_action(self, value): """ Set permitted_action value. Notes: The permitted action to USE/EXTEND this Gateway. This attribute is named `permittedAction` in VSD API. """ self._permitted_action = value @property def personality(self): """ Get personality value. Notes: Personality of the Gateway, cannot be changed after creation. """ return self._personality @personality.setter def personality(self, value): """ Set personality value. Notes: Personality of the Gateway, cannot be changed after creation. """ self._personality = value @property def description(self): """ Get description value. Notes: A description of the Gateway """ return self._description @description.setter def description(self, value): """ Set description value. Notes: A description of the Gateway """ self._description = value @property def libraries(self): """ Get libraries value. Notes: Versions of monitored libraries currently installed on the Gateway. """ return self._libraries @libraries.setter def libraries(self, value): """ Set libraries value. Notes: Versions of monitored libraries currently installed on the Gateway. """ self._libraries = value @property def embedded_metadata(self): """ Get embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): """ Set embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ self._embedded_metadata = value @property def enterprise_id(self): """ Get enterprise_id value. Notes: The enterprise associated with this Gateway. This is a read only attribute This attribute is named `enterpriseID` in VSD API. """ return self._enterprise_id @enterprise_id.setter def enterprise_id(self, value): """ Set enterprise_id value. Notes: The enterprise associated with this Gateway. This is a read only attribute This attribute is named `enterpriseID` in VSD API. """ self._enterprise_id = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def location_id(self): """ Get location_id value. Notes: Association to an object which contains location information about this gateway instance. This attribute is named `locationID` in VSD API. """ return self._location_id @location_id.setter def location_id(self, value): """ Set location_id value. Notes: Association to an object which contains location information about this gateway instance. This attribute is named `locationID` in VSD API. """ self._location_id = value @property def bootstrap_id(self): """ Get bootstrap_id value. Notes: The bootstrap details associated with this Gateway. NOTE: This is a read only property, it can only be set during creation of a gateway. This attribute is named `bootstrapID` in VSD API. """ return self._bootstrap_id @bootstrap_id.setter def bootstrap_id(self, value): """ Set bootstrap_id value. Notes: The bootstrap details associated with this Gateway. NOTE: This is a read only property, it can only be set during creation of a gateway. This attribute is named `bootstrapID` in VSD API. """ self._bootstrap_id = value @property def bootstrap_status(self): """ Get bootstrap_status value. Notes: The bootstrap status of this Gateway. NOTE: This is a read only property. This attribute is named `bootstrapStatus` in VSD API. """ return self._bootstrap_status @bootstrap_status.setter def bootstrap_status(self, value): """ Set bootstrap_status value. Notes: The bootstrap status of this Gateway. NOTE: This is a read only property. This attribute is named `bootstrapStatus` in VSD API. """ self._bootstrap_status = value @property def creation_date(self): """ Get creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ return self._creation_date @creation_date.setter def creation_date(self, value): """ Set creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ self._creation_date = value @property def product_name(self): """ Get product_name value. Notes: Product Name as reported during bootstrapping. This attribute is named `productName` in VSD API. """ return self._product_name @product_name.setter def product_name(self, value): """ Set product_name value. Notes: Product Name as reported during bootstrapping. This attribute is named `productName` in VSD API. """ self._product_name = value @property def use_gateway_vlanvnid(self): """ Get use_gateway_vlanvnid value. Notes: When set, VLAN-VNID mapping must be unique for all the vports of the gateway This attribute is named `useGatewayVLANVNID` in VSD API. """ return self._use_gateway_vlanvnid @use_gateway_vlanvnid.setter def use_gateway_vlanvnid(self, value): """ Set use_gateway_vlanvnid value. Notes: When set, VLAN-VNID mapping must be unique for all the vports of the gateway This attribute is named `useGatewayVLANVNID` in VSD API. """ self._use_gateway_vlanvnid = value @property def associated_gnmi_profile_id(self): """ Get associated_gnmi_profile_id value. Notes: UUID of the GNMI Profile associated to this gateway. This attribute is named `associatedGNMIProfileID` in VSD API. """ return self._associated_gnmi_profile_id @associated_gnmi_profile_id.setter def associated_gnmi_profile_id(self, value): """ Set associated_gnmi_profile_id value. Notes: UUID of the GNMI Profile associated to this gateway. This attribute is named `associatedGNMIProfileID` in VSD API. """ self._associated_gnmi_profile_id = value @property def associated_gateway_security_id(self): """ Get associated_gateway_security_id value. Notes: Read only ID of the associated gateway security object. This attribute is named `associatedGatewaySecurityID` in VSD API. """ return self._associated_gateway_security_id @associated_gateway_security_id.setter def associated_gateway_security_id(self, value): """ Set associated_gateway_security_id value. Notes: Read only ID of the associated gateway security object. This attribute is named `associatedGatewaySecurityID` in VSD API. """ self._associated_gateway_security_id = value @property def associated_gateway_security_profile_id(self): """ Get associated_gateway_security_profile_id value. Notes: Readonly Id of the associated gateway security profile object This attribute is named `associatedGatewaySecurityProfileID` in VSD API. """ return self._associated_gateway_security_profile_id @associated_gateway_security_profile_id.setter def associated_gateway_security_profile_id(self, value): """ Set associated_gateway_security_profile_id value. Notes: Readonly Id of the associated gateway security profile object This attribute is named `associatedGatewaySecurityProfileID` in VSD API. """ self._associated_gateway_security_profile_id = value @property def associated_nsg_info_id(self): """ Get associated_nsg_info_id value. Notes: Read only ID of the associated gateway information object This attribute is named `associatedNSGInfoID` in VSD API. """ return self._associated_nsg_info_id @associated_nsg_info_id.setter def associated_nsg_info_id(self, value): """ Set associated_nsg_info_id value. Notes: Read only ID of the associated gateway information object This attribute is named `associatedNSGInfoID` in VSD API. """ self._associated_nsg_info_id = value @property def associated_netconf_profile_id(self): """ Get associated_netconf_profile_id value. Notes: UUID of the Netconf Profile associated to this gateway. This attribute is named `associatedNetconfProfileID` in VSD API. """ return self._associated_netconf_profile_id @associated_netconf_profile_id.setter def associated_netconf_profile_id(self, value): """ Set associated_netconf_profile_id value. Notes: UUID of the Netconf Profile associated to this gateway. This attribute is named `associatedNetconfProfileID` in VSD API. """ self._associated_netconf_profile_id = value @property def vtep(self): """ Get vtep value. Notes: Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address. """ return self._vtep @vtep.setter def vtep(self, value): """ Set vtep value. Notes: Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address. """ self._vtep = value @property def auto_disc_gateway_id(self): """ Get auto_disc_gateway_id value. Notes: The Auto Discovered Gateway associated with this Gateway Instance This attribute is named `autoDiscGatewayID` in VSD API. """ return self._auto_disc_gateway_id @auto_disc_gateway_id.setter def auto_disc_gateway_id(self, value): """ Set auto_disc_gateway_id value. Notes: The Auto Discovered Gateway associated with this Gateway Instance This attribute is named `autoDiscGatewayID` in VSD API. """ self._auto_disc_gateway_id = value @property def owner(self): """ Get owner value. Notes: Identifies the user that has created this object. """ return self._owner @owner.setter def owner(self, value): """ Set owner value. Notes: Identifies the user that has created this object. """ self._owner = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value @property def system_id(self): """ Get system_id value. Notes: Identifier of the Gateway, cannot be modified after creation This attribute is named `systemID` in VSD API. """ return self._system_id @system_id.setter def system_id(self, value): """ Set system_id value. Notes: Identifier of the Gateway, cannot be modified after creation This attribute is named `systemID` in VSD API. """ self._system_id = value ## Custom methods def is_template(self): """ Verify that the object is a template Returns: (bool): True if the object is a template """ return False def is_from_template(self): """ Verify if the object has been instantiated from a template Note: The object has to be fetched. Otherwise, it does not have information from its parent Returns: (bool): True if the object is a template """ return self.template_id
31.116935
378
0.61371
28a37318fb527662ca68fb5cc180c419a7475ec3
1,971
py
Python
aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/CreateQualityFollowerRequest.py
jia-jerry/aliyun-openapi-python-sdk
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
[ "Apache-2.0" ]
null
null
null
aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/CreateQualityFollowerRequest.py
jia-jerry/aliyun-openapi-python-sdk
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
[ "Apache-2.0" ]
null
null
null
aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/CreateQualityFollowerRequest.py
jia-jerry/aliyun-openapi-python-sdk
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkdataworks_public.endpoint import endpoint_data class CreateQualityFollowerRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateQualityFollower','dide') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_AlarmMode(self): return self.get_body_params().get('AlarmMode') def set_AlarmMode(self,AlarmMode): self.add_body_params('AlarmMode', AlarmMode) def get_ProjectName(self): return self.get_body_params().get('ProjectName') def set_ProjectName(self,ProjectName): self.add_body_params('ProjectName', ProjectName) def get_Follower(self): return self.get_body_params().get('Follower') def set_Follower(self,Follower): self.add_body_params('Follower', Follower) def get_EntityId(self): return self.get_body_params().get('EntityId') def set_EntityId(self,EntityId): self.add_body_params('EntityId', EntityId)
35.196429
94
0.764079
af216334466d77866a11b64441a9793963fb1cf6
191
py
Python
Python/Day4_Geometric_Distribution_I.py
nikhilbhatewara/HackerRank
87a59429cc672a34ea5841235dc9a706195d18a4
[ "MIT" ]
null
null
null
Python/Day4_Geometric_Distribution_I.py
nikhilbhatewara/HackerRank
87a59429cc672a34ea5841235dc9a706195d18a4
[ "MIT" ]
null
null
null
Python/Day4_Geometric_Distribution_I.py
nikhilbhatewara/HackerRank
87a59429cc672a34ea5841235dc9a706195d18a4
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- #p defective is 1/3 #during 5th inspection => n=5 def g(n, p): return (((1-p)** (n-1)) * p) n=5 p=1/3 output=g(n,p) print (round(output,3))
12.733333
32
0.549738
3f839882840af11d8cc2750c3165904c3a58b850
2,750
py
Python
examples/ad_manager/v201802/reconciliation_line_item_report_service/update_reconciliation_line_item_reports.py
khanhnhk/googleads-python-lib
1e882141b8eb663b55dd582ce0f4fbf3cd2f672d
[ "Apache-2.0" ]
1
2021-12-30T15:21:42.000Z
2021-12-30T15:21:42.000Z
examples/ad_manager/v201802/reconciliation_line_item_report_service/update_reconciliation_line_item_reports.py
benlistyg/googleads-python-lib
1e882141b8eb663b55dd582ce0f4fbf3cd2f672d
[ "Apache-2.0" ]
null
null
null
examples/ad_manager/v201802/reconciliation_line_item_report_service/update_reconciliation_line_item_reports.py
benlistyg/googleads-python-lib
1e882141b8eb663b55dd582ce0f4fbf3cd2f672d
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example updates a reconciliation line item report. To get reconciliation line item reports for a reconciliation report, run get_reconciliation_line_item_reports_for_reconciliation_report.py """ # Import appropriate modules from the client library. from googleads import ad_manager # Set the ID of the reconciliation line item report to update. RECONCILIATION_LINE_ITEM_REPORT_ID = 'INSERT_ID_HERE' def main(client): # Initialize appropriate service. reconciliation_line_item_report_service = (client.GetService( 'ReconciliationLineItemReportService', version='v201802')) # Create a statement to select a reconciliation line item report. statement = (ad_manager.StatementBuilder() .Where('id = :lineItemReportId') .OrderBy('id', ascending=True) .Limit(1) .WithBindVariable('lineItemReportId', RECONCILIATION_LINE_ITEM_REPORT_ID)) # Get reconciliation line item reports by statement. page = (reconciliation_line_item_report_service .getReconciliationLineItemReportsByStatement(statement.ToStatement())) line_item_report = page['results'][0] # Set and use a manual volume for billing. This example splits the difference # between Ad Manager and the third party volume. line_item_report['manualVolume'] = (line_item_report['dfpVolume'] + line_item_report['thirdPartyVolume']) / 2 line_item_report['reconciliationSource'] = 'MANUAL' result = (reconciliation_line_item_report_service .updateReconciliationLineItemReports([line_item_report])) for updated_report in result: print ('Reconciliation line item report with ID %d for line item ID %d was ' 'updated, with manual volume %d' % (updated_report['id'], updated_report['lineItemId'], updated_report['manualVolume'])) if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client)
40.441176
80
0.710182
e1a8c5b344c3347d758d36dd516214a39f2adbbc
1,228
py
Python
setup.py
kemalcanbora/linkedin-api
c3ea7677314972a79f6344028c0371613ee5907e
[ "MIT" ]
1
2020-10-03T11:23:44.000Z
2020-10-03T11:23:44.000Z
setup.py
kemalcanbora/linkedin-api
c3ea7677314972a79f6344028c0371613ee5907e
[ "MIT" ]
null
null
null
setup.py
kemalcanbora/linkedin-api
c3ea7677314972a79f6344028c0371613ee5907e
[ "MIT" ]
null
null
null
import setuptools import ast import re from pathlib import Path CURRENT_DIR = Path(__file__).parent def get_long_description() -> str: readme_md = CURRENT_DIR / "README.md" with open(readme_md, encoding="utf8") as ld_file: return ld_file.read() def get_version() -> str: black_py = CURRENT_DIR / "linkedin_api/__init__.py" _version_re = re.compile(r"__version__\s+=\s+(?P<version>.*)") with open(black_py, "r", encoding="utf8") as f: match = _version_re.search(f.read()) version = match.group("version") if match is not None else '"unknown"' return str(ast.literal_eval(version)) setuptools.setup( name="linkedin_api", version="1.0.1", author="Quirk", author_email="tomquirkacc@gmail.com", description="Python wrapper for the Linkedin API", long_description="long_description", long_description_content_type="text/markdown", url="https://github.com/kemalcanbora/linkedin-api", license="MIT", packages=setuptools.find_packages(), install_requires=["requests"], classifiers=( "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ), )
29.95122
78
0.674267
8e2b83f900468bcb76049f65beb3534079e043be
644
py
Python
server.py
dvtate/single-file-programs
1c54833ebee5fa20d04b1d36c59fb553de985812
[ "MIT" ]
2
2017-09-04T05:11:53.000Z
2020-01-18T22:31:25.000Z
server.py
dvtate/single-file-programs
1c54833ebee5fa20d04b1d36c59fb553de985812
[ "MIT" ]
null
null
null
server.py
dvtate/single-file-programs
1c54833ebee5fa20d04b1d36c59fb553de985812
[ "MIT" ]
null
null
null
import socket; import sys; host = "127.0.0.1"; sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM); server_address = (host, 10000); #localhost + port 10000 print ("The server is starting :D"); # start working try: sock.bind(server_address); except: print("ERROR: port in use."); new_port = raw_input("New port number: "); sock.bind((host, int(new_port))); #quit(); sock.listen(1); print ("Listening..."); while True: connection, address = sock.accept(); while True: data = connection.recv(1024); print("recieved :D %s" %(data)); connection.sendall(data); if (data == "close"): break; connection.close();
18.4
57
0.659938
20a4452959e375202a8c22c13cb92d32d73588ac
18,458
py
Python
src/lib/nets/volumetric/hrnet/seg_hrnet3d.py
charzharr/Hierarchical-Contrastive-Pretraining
3c7ad022a90fe7137ed3a22655d48ae1ba27510c
[ "MIT" ]
null
null
null
src/lib/nets/volumetric/hrnet/seg_hrnet3d.py
charzharr/Hierarchical-Contrastive-Pretraining
3c7ad022a90fe7137ed3a22655d48ae1ba27510c
[ "MIT" ]
null
null
null
src/lib/nets/volumetric/hrnet/seg_hrnet3d.py
charzharr/Hierarchical-Contrastive-Pretraining
3c7ad022a90fe7137ed3a22655d48ae1ba27510c
[ "MIT" ]
null
null
null
# ------------------------------------------------------------------------------ # Adapted by Charley Zhang (Dec 2021) from: # https://github.com/HRNet/HRNet-Semantic-Segmentation/blob/pytorch-v1.1/lib/models/seg_hrnet.py # ------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License. # Written by Ke Sun (sunk@mail.ustc.edu.cn) # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import functools import numpy as np import torch import torch.nn as nn import torch._utils import torch.nn.functional as F BatchNorm2d = nn.BatchNorm2d BN_MOMENTUM = 0.01 logger = logging.getLogger(__name__) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class HighResolutionModule(nn.Module): def __init__(self, num_branches, blocks, num_blocks, num_inchannels, num_channels, fuse_method, multi_scale_output=True): super(HighResolutionModule, self).__init__() self._check_branches( num_branches, blocks, num_blocks, num_inchannels, num_channels) self.num_inchannels = num_inchannels self.fuse_method = fuse_method self.num_branches = num_branches self.multi_scale_output = multi_scale_output self.branches = self._make_branches( num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=True) def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): if num_branches != len(num_blocks): error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( num_branches, len(num_blocks)) logger.error(error_msg) raise ValueError(error_msg) if num_branches != len(num_channels): error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( num_branches, len(num_channels)) logger.error(error_msg) raise ValueError(error_msg) if num_branches != len(num_inchannels): error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( num_branches, len(num_inchannels)) logger.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or \ self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, kernel_size=1, stride=stride, bias=False), BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM), ) layers = [] layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)) self.num_inchannels[branch_index] = \ num_channels[branch_index] * block.expansion for i in range(1, num_blocks[branch_index]): layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) return nn.Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return None num_branches = self.num_branches num_inchannels = self.num_inchannels fuse_layers = [] for i in range(num_branches if self.multi_scale_output else 1): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append(nn.Sequential( nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM))) elif j == i: fuse_layer.append(None) else: conv3x3s = [] for k in range(i-j): if k == i - j - 1: num_outchannels_conv3x3 = num_inchannels[i] conv3x3s.append(nn.Sequential( nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM))) else: num_outchannels_conv3x3 = num_inchannels[j] conv3x3s.append(nn.Sequential( nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def get_num_inchannels(self): return self.num_inchannels def forward(self, x): if self.num_branches == 1: return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) for j in range(1, self.num_branches): if i == j: y = y + x[j] elif j > i: width_output = x[i].shape[-1] height_output = x[i].shape[-2] y = y + F.interpolate( self.fuse_layers[i][j](x[j]), size=[height_output, width_output], mode='bilinear') else: y = y + self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse blocks_dict = { 'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck } class HighResolutionNet(nn.Module): def __init__(self, config, **kwargs): extra = config.MODEL.EXTRA super(HighResolutionNet, self).__init__() # stem net self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.stage1_cfg = extra['STAGE1'] num_channels = self.stage1_cfg['NUM_CHANNELS'][0] block = blocks_dict[self.stage1_cfg['BLOCK']] num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) stage1_out_channel = block.expansion*num_channels self.stage2_cfg = extra['STAGE2'] num_channels = self.stage2_cfg['NUM_CHANNELS'] block = blocks_dict[self.stage2_cfg['BLOCK']] num_channels = [ num_channels[i] * block.expansion for i in range(len(num_channels))] self.transition1 = self._make_transition_layer( [stage1_out_channel], num_channels) self.stage2, pre_stage_channels = self._make_stage( self.stage2_cfg, num_channels) self.stage3_cfg = extra['STAGE3'] num_channels = self.stage3_cfg['NUM_CHANNELS'] block = blocks_dict[self.stage3_cfg['BLOCK']] num_channels = [ num_channels[i] * block.expansion for i in range(len(num_channels))] self.transition2 = self._make_transition_layer( pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage( self.stage3_cfg, num_channels) self.stage4_cfg = extra['STAGE4'] num_channels = self.stage4_cfg['NUM_CHANNELS'] block = blocks_dict[self.stage4_cfg['BLOCK']] num_channels = [ num_channels[i] * block.expansion for i in range(len(num_channels))] self.transition3 = self._make_transition_layer( pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multi_scale_output=True) last_inp_channels = np.int(np.sum(pre_stage_channels)) self.last_layer = nn.Sequential( nn.Conv2d( in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0), BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d( in_channels=last_inp_channels, out_channels=config.DATASET.NUM_CLASSES, kernel_size=extra.FINAL_CONV_KERNEL, stride=1, padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0) ) def _make_transition_layer( self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append(nn.Sequential( nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), BatchNorm2d( num_channels_cur_layer[i], momentum=BN_MOMENTUM), nn.ReLU(inplace=True))) else: transition_layers.append(None) else: conv3x3s = [] for j in range(i+1-num_branches_pre): inchannels = num_channels_pre_layer[-1] outchannels = num_channels_cur_layer[i] \ if j == i-num_branches_pre else inchannels conv3x3s.append(nn.Sequential( nn.Conv2d( inchannels, outchannels, 3, 2, 1, bias=False), BatchNorm2d(outchannels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv3x3s)) return nn.ModuleList(transition_layers) def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), ) layers = [] layers.append(block(inplanes, planes, stride, downsample)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(inplanes, planes)) return nn.Sequential(*layers) def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True): num_modules = layer_config['NUM_MODULES'] num_branches = layer_config['NUM_BRANCHES'] num_blocks = layer_config['NUM_BLOCKS'] num_channels = layer_config['NUM_CHANNELS'] block = blocks_dict[layer_config['BLOCK']] fuse_method = layer_config['FUSE_METHOD'] modules = [] for i in range(num_modules): # multi_scale_output is only used last module if not multi_scale_output and i == num_modules - 1: reset_multi_scale_output = False else: reset_multi_scale_output = True modules.append( HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output) ) num_inchannels = modules[-1].get_num_inchannels() return nn.Sequential(*modules), num_inchannels def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.layer1(x) x_list = [] for i in range(self.stage2_cfg['NUM_BRANCHES']): if self.transition1[i] is not None: x_list.append(self.transition1[i](x)) else: x_list.append(x) y_list = self.stage2(x_list) x_list = [] for i in range(self.stage3_cfg['NUM_BRANCHES']): if self.transition2[i] is not None: x_list.append(self.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage3(x_list) x_list = [] for i in range(self.stage4_cfg['NUM_BRANCHES']): if self.transition3[i] is not None: x_list.append(self.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) x = self.stage4(x_list) # Upsampling x0_h, x0_w = x[0].size(2), x[0].size(3) x1 = F.upsample(x[1], size=(x0_h, x0_w), mode='bilinear') x2 = F.upsample(x[2], size=(x0_h, x0_w), mode='bilinear') x3 = F.upsample(x[3], size=(x0_h, x0_w), mode='bilinear') x = torch.cat([x[0], x1, x2, x3], 1) x = self.last_layer(x) return x def init_weights(self, pretrained='',): logger.info('=> init weights from normal distribution') for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.001) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if os.path.isfile(pretrained): pretrained_dict = torch.load(pretrained) logger.info('=> loading pretrained model {}'.format(pretrained)) model_dict = self.state_dict() pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()} #for k, _ in pretrained_dict.items(): # logger.info( # '=> loading {} pretrained model {}'.format(k, pretrained)) model_dict.update(pretrained_dict) self.load_state_dict(model_dict) def get_seg_model(cfg, **kwargs): model = HighResolutionNet(cfg, **kwargs) model.init_weights(cfg.MODEL.PRETRAINED) return model
38.215321
96
0.545021
d52168378699f96e11ce0c868dbbbe3c04e75d60
135
py
Python
codility_python_tests/05_max_gaps_binary.py
sreedhar-venkatesan/codility-python-tests
b9536eb1bced90ba9a5a192d3cc424be4367b406
[ "MIT" ]
null
null
null
codility_python_tests/05_max_gaps_binary.py
sreedhar-venkatesan/codility-python-tests
b9536eb1bced90ba9a5a192d3cc424be4367b406
[ "MIT" ]
null
null
null
codility_python_tests/05_max_gaps_binary.py
sreedhar-venkatesan/codility-python-tests
b9536eb1bced90ba9a5a192d3cc424be4367b406
[ "MIT" ]
null
null
null
def solution(N): return len(max(format(N,'b').strip('0').split('1'))) if __name__ =="__main__": X= solution(20) print (X)
19.285714
56
0.585185
43efe74f8c73549c9ddcdabc2ea331f04dae63ff
1,954
py
Python
client.py
AricHasting/senior-software
0424cd9aa94533ef8ba58a2f70e279761028f96e
[ "MIT" ]
null
null
null
client.py
AricHasting/senior-software
0424cd9aa94533ef8ba58a2f70e279761028f96e
[ "MIT" ]
7
2018-09-02T23:42:43.000Z
2018-11-08T22:14:28.000Z
client.py
AricHasting/senior-software
0424cd9aa94533ef8ba58a2f70e279761028f96e
[ "MIT" ]
4
2018-08-30T01:12:11.000Z
2018-09-11T17:44:57.000Z
#!/usr/bin/env python3 # Credit: https://medium.com/swlh/lets-write-a-chat-app-in-python-f6783a9ac170 """Script for Tkinter GUI chat client.""" from socket import AF_INET, socket, SOCK_STREAM from threading import Thread from collections import deque import re import select # This will be the list of received messages buffer = deque() connected = True client_socket = None def attempt_to_receive(): """Handles receiving of messages. This method will run and continuously check to see if a message comes in. If a message comes in, it'll be added to the buffer.""" global client_socket global connected while connected: try: ready = select.select([client_socket], [], []) if ready[0]: msg = client_socket.recv(1024).decode("utf8") buffer.append(msg) except OSError: # Possibly client has left the chat. raise("OSError") return def has_message(): """Will indicate if a message is ready to be read.""" if buffer: return True else: return False def receive(): """Actually returns a message from buffer if it has one. If the buffer is empty, an empty string will be returned""" if buffer: return buffer.popleft() else: return '' def send(msg): """This will broadcast msg across the chatroom""" global connected if not connected: raise("Can't send message because this client is not connected!") """Handles sending of messages.""" if(msg == "{quit}"): connected = False client_socket.send(bytes(msg, "utf8")) # def connect(HOST, PORT): """This must be the first client method that is called""" ADDR = (HOST, PORT) global client_socket client_socket = socket(AF_INET, SOCK_STREAM) client_socket.connect(ADDR) receive_thread = Thread(target=attempt_to_receive) receive_thread.daemon = True receive_thread.start()
28.735294
78
0.663255
cd97183d361f561198349a724fb7a039e7f811d2
2,049
py
Python
tests/test_database/test_subroutines.py
ajstewart/tkp
2aab1d021d10e3d1f3c4c8a836aea96ac6ae413f
[ "BSD-2-Clause" ]
9
2015-04-30T22:10:14.000Z
2020-06-09T01:24:20.000Z
tests/test_database/test_subroutines.py
ajstewart/tkp
2aab1d021d10e3d1f3c4c8a836aea96ac6ae413f
[ "BSD-2-Clause" ]
218
2015-01-08T11:10:57.000Z
2021-11-25T05:52:42.000Z
tests/test_database/test_subroutines.py
ajstewart/tkp
2aab1d021d10e3d1f3c4c8a836aea96ac6ae413f
[ "BSD-2-Clause" ]
14
2015-03-11T11:21:58.000Z
2020-06-16T09:15:57.000Z
import math import unittest import tkp.db from tkp.testutil.decorators import requires_database from tkp.testutil.db_queries import convert_to_cartesian as db_cartesian from tkp.utility.coordinates import eq_to_cart as py_cartesian """Test miscellaneous minor database functions""" @requires_database() class TestCartesianConversions(unittest.TestCase): """This is a very simple function, but it's worth a quick check for numerical consistency, since we are utilising two different functions to give the same thing. (And if nothing else it enforces a check on future code alterations ). For known results, we use pairs of tuples; structure is: ( (ra, decl), (x,y,z) ) """ def setUp(self): self.db = tkp.db.Database() def test_known_results(self): pole_results = ((0.0, 90.0), (0, 0, 1.0)) meridian_eq = ((0.0, 0.0), (1.0, 0, 0)) meridian_eq_wrap = ((360.0, 0.0), (1.0, 0, 0)) antimeridian_eq = ((180.0, 0.0), (-1.0, 0, 0)) ninety_eq = ((90, 0.0), (0.0, 1.0, 0)) fortyfive_eq = ((45, 0.0), (1.0 / math.sqrt(2), 1.0 / math.sqrt(2), 0)) fortyfive_fortyfive = ((45, 45.0), (math.sin(math.radians(45)) / math.sqrt(2), math.sin(math.radians(45)) / math.sqrt(2), math.cos(math.radians(45)))) def check_known_result(kr): py_result = py_cartesian(*kr[0]) db_result = db_cartesian(self.db.connection, *kr[0]) for i in range(3): self.assertAlmostEqual(kr[1][i], py_result[i]) self.assertAlmostEqual(kr[1][i], db_result[i]) for kr in (pole_results, meridian_eq, meridian_eq_wrap, antimeridian_eq, ninety_eq, fortyfive_eq): check_known_result(kr)
34.15
74
0.545144
d06ed512aa4d2bcbf49f89cf296b71172c46fc5f
7,297
py
Python
script/grafana.py
NovatecConsulting/grafana-ldap-sync-script
71e6b9f5c16de14bf35177b9f036986c2205753e
[ "Apache-2.0" ]
9
2020-07-17T06:36:23.000Z
2022-03-27T19:35:50.000Z
script/grafana.py
NovatecConsulting/grafana-ldap-sync-script
71e6b9f5c16de14bf35177b9f036986c2205753e
[ "Apache-2.0" ]
2
2021-08-19T13:25:32.000Z
2022-02-03T16:06:55.000Z
script/grafana.py
NovatecConsulting/grafana-ldap-sync-script
71e6b9f5c16de14bf35177b9f036986c2205753e
[ "Apache-2.0" ]
6
2021-01-05T18:46:36.000Z
2022-03-28T11:35:46.000Z
from grafana_api.grafana_api import GrafanaClientError, GrafanaBadInputError from grafana_api.grafana_face import GrafanaFace from .config import * from .helpers import * import logging grafana_api = "" configuration = "" logger = logging.getLogger() logger_mut = logging.getLogger("mutate") def setup_grafana(config_dict): global grafana_api, configuration configuration = config_dict grafana_api = GrafanaFace( auth=configuration.GRAFANA_AUTH, host=configuration.GRAFANA_URL ) def delete_team_by_name(name): """ Deletes a team with a given name. :param name: The name if the Team to be deleted. :return: Returns True if the team to be deleted existed, returns False if it did not. """ team_data = grafana_api.teams.get_team_by_name(name) if len(team_data) > 0: for data_set in team_data: logger_mut.info("Deleting team with name %s and id %s" % (name, data_set["id"])) if not configuration.DRY_RUN: grafana_api.teams.delete_team(data_set["id"]) return True return False def create_team(name, mail): """ Creates a team with the given name and mail. :param name: The name of the team. :param mail: The mail of the team. :return: The API response. """ logger_mut.info("Creating team with name %s" % name) if not configuration.DRY_RUN: return grafana_api.teams.add_team({ "name": name, "mail": mail }) def create_user_with_random_pw(user): """ Creates a user from a dictionary resembling a user. Generates a random alphanumerical String as password. :param user: The dictionary off of which the user should be created. """ user_dict = dict(user) user_dict["password"] = get_random_alphanumerical() user_dict["OrgId"] = 1 logger_mut.info("Creating user with login %s, name %s and mail %s" % (user_dict["login"], user_dict["name"], user_dict["email"])) if not configuration.DRY_RUN: grafana_api.admin.create_user(user_dict) def delete_user_by_login(login): """ Deletes the user with the given login. :param login: The login of the user to be deleted. :return: The response of the api. """ if login == configuration.GRAFANA_AUTH[0]: logger.info("The user '%s' is used by this script for accessing Grafana thus will not be deleted." % login) else: logger_mut.info("Deleting user with name %s" % login) if not configuration.DRY_RUN: return grafana_api.admin.delete_user(grafana_api.users.find_user(login)["id"]) return False def create_folder(folder_name, folder_uuid): """ Creates a folder with a given name and uuid. Returns the api-response if the folder was create successfully. If an error occurs, false is returned. :param folder_name: The name of the folder to be created. :param folder_uuid: The uuid of the folder to be created. :return: The api-response if the folder was create successfully. If an error occurs, false is returned. """ try: logger_mut.info("Creating folder with name %s and id %s" % (folder_name, folder_uuid)) if not configuration.DRY_RUN: return grafana_api.folder.create_folder(folder_name, folder_uuid) except GrafanaClientError: return False def add_user_to_team(login, team): """ Adds the user with the given login to the team with the given name. :param login: The login of the user to be added to the team. :param team: The team the user should be added to. """ try: logger_mut.info("Adding user %s to team %s" % (login, team)) if not configuration.DRY_RUN: grafana_api.teams.add_team_member(get_id_of_team(team), get_id_by_login(login)) except GrafanaBadInputError: return False def get_members_of_team(team): """ Returns an array containing all members of the team carrying the given name. Each user is represented by a dictionary consisting of "name", "email" and "login" :param team: The name of the team the members should be returned of. :return: An array containing all users as described above. """ logger.info("Fetching members of team %s" % team) teams = grafana_api.teams.get_team_by_name(team) if not teams: return [] result = [] users = grafana_api.teams.get_team_members(teams[0]["id"]) if users is not None: for user in users: result.append({"login": user["login"], "name": user["name"], "email": user["email"]}) return result def remove_member_from_team(grafana_team, user_login): logger_mut.info("Removing user %s from team %s" % (user_login, grafana_team)) if not configuration.DRY_RUN: grafana_api.teams.remove_team_member(get_id_of_team(grafana_team), get_id_by_login(user_login)) def login_taken(login): """ Checks if a given grafana-login is already taken. Returns True if the login is taken. :param login: the grafana-login which should be checked. :return: True if the grafana-login is already taken, False if the login is available. """ try: grafana_api.users.find_user(login) return True except GrafanaClientError: return False def get_id_by_login(login): """ Returns the id of a grafana-login. :param login: the grafana-login the id should be returned from. :return: The id of the given login. """ return grafana_api.users.find_user(login)["id"] def exists_folder(uid): """ Checks if a folder with the given uid exists in grafana. :param uid: The uid of the folder that should be checked. :return: Returns True if the folder exists, otherwise False is returned. """ try: grafana_api.folder.get_folder(uid) return True except GrafanaClientError: return False def get_id_of_team(team): """ Returns the id of the grafana team with the given name. :param team: The name of the grafana team. :return: The id of the grafana team with the given name. Returns False if the Team does not exist. """ teams = grafana_api.teams.get_team_by_name(team) if len(teams) < 1: return False return teams[0]["id"] def update_folder_permissions(folder_id, permissions): """ Sets the given permissions for the folder found under the given id """ logger_mut.info("Setting permission of folder %s to %s" % (folder_id, permissions)) if not configuration.DRY_RUN: grafana_api.folder.update_folder_permissions(folder_id, {"items": permissions}) def get_all_teams(): """ Returns all teams present in the connected grafana instance. """ return grafana_api.teams.search_teams() def get_all_users(): """ Returns all users present in the connected grafana instance. """ logger.info("Fetching all grafana users") user_logins = [] users = grafana_api.users.search_users() if users is not None: for user in users: user_logins.append({"login": user["login"], "name": user["name"], "email": user["email"]}) return user_logins
33.626728
133
0.670138
eb895e030d257898202d3fbafed4088ca2da7504
2,228
py
Python
tensorlayer/third_party/roi_pooling/test_roi_layer.py
OliverZijia/tensorlayer2
01113b53e84a3bbb298b9c35ebd53254e487350f
[ "Apache-2.0" ]
null
null
null
tensorlayer/third_party/roi_pooling/test_roi_layer.py
OliverZijia/tensorlayer2
01113b53e84a3bbb298b9c35ebd53254e487350f
[ "Apache-2.0" ]
null
null
null
tensorlayer/third_party/roi_pooling/test_roi_layer.py
OliverZijia/tensorlayer2
01113b53e84a3bbb298b9c35ebd53254e487350f
[ "Apache-2.0" ]
null
null
null
from tensorlayer.layers import * from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import \ roi_pooling # from roi_pooling.roi_pooling_ops import roi_pooling # input feature map going into the RoI pooling input_value = [[[[1], [2], [4], [4]], [[3], [4], [1], [2]], [[6], [2], [1], [7.0]], [[1], [3], [2], [8]]]] input_value = np.asarray(input_value, dtype='float32') # Regions of interest as lists of: # feature map index, upper left, bottom right coordinates rois_value = [[0, 0, 0, 1, 1], [0, 1, 1, 2, 2], [0, 2, 2, 3, 3], [0, 0, 0, 2, 2], [0, 0, 0, 3, 3]] rois_value = np.asarray(rois_value, dtype='int32') # the pool_height and width are parameters of the ROI layer pool_height, pool_width = (2, 2) n_rois = len(rois_value) y_shape = [n_rois, 1, pool_height, pool_width] print('Input: ', input_value, ', shape: ', input_value.shape) print('ROIs: ', rois_value, ', shape: ', rois_value.shape) # precise semantics is now only defined by the kernel, need tests input = tf.compat.v1.placeholder(tf.float32) rois = tf.compat.v1.placeholder(tf.int32) # y = roi_pooling(input, rois, pool_height=2, pool_width=2) n = InputLayer(input, name='in') n = ROIPoolingLayer(n, rois=rois, pool_height=2, pool_width=2, name='roi') y = n.outputs mean = tf.reduce_mean(input_tensor=y) grads = tf.gradients(mean, input) print(type(grads)) print(len(grads)) print(grads) print(input_value.shape) with tf.compat.v1.Session('') as sess: input_const = tf.constant(input_value, tf.float32) rois_const = tf.constant(rois_value, tf.int32) y = roi_pooling(input_const, rois_const, pool_height=2, pool_width=2) mean = tf.reduce_mean(input_tensor=y) numerical_grad_error_1 = tf.compat.v1.test.compute_gradient_error([input_const], [input_value.shape], y, y_shape) numerical_grad_error_2 = tf.compat.v1.test.compute_gradient_error([input_const], [input_value.shape], mean, []) print(numerical_grad_error_1, numerical_grad_error_2) with tf.compat.v1.Session('') as sess: y_output = sess.run(y, feed_dict={input: input_value, rois: rois_value}) print('y: ', y_output) grads_output = sess.run(grads, feed_dict={input: input_value, rois: rois_value}) print('grads: ', grads_output)
40.509091
117
0.708707
e828cbfd535905e6a0b545013d4d15a73f7a9da2
18,827
py
Python
tools/SDKTool/libs/WrappedDeviceAPI/demo/demo.py
BernhardRiemann/GameAISDK
da24c600e1cdc890739ee274032a17fb9ce75c5c
[ "Apache-2.0" ]
3
2021-03-15T13:53:37.000Z
2021-11-17T10:34:29.000Z
tools/SDKTool/libs/WrappedDeviceAPI/demo/demo.py
VenmoTools/GameAISDK
208320760440400d369aa8ab2f2439494195e6bd
[ "Apache-2.0" ]
null
null
null
tools/SDKTool/libs/WrappedDeviceAPI/demo/demo.py
VenmoTools/GameAISDK
208320760440400d369aa8ab2f2439494195e6bd
[ "Apache-2.0" ]
1
2021-02-19T12:04:05.000Z
2021-02-19T12:04:05.000Z
# -*- coding: utf-8 -*- """ This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """ import time import cv2 import sys import os import queue __dir__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, __dir__) from WrappedDeviceAPI import * def sample(): deviceAPI = IDeviceAPI('Android') ''' describe:初始化 param[0],str类型:手机序列号,默认为None,当接入一个设备时可不指定序列号,当接入多个设备时需要指定 param[1],bool类型:手机为横屏还是竖屏,True为竖屏,False为横屏 param[2],int类型:长边的长度 param[3],str类型:指定日志目录,默认为/tmp/LogDeviceAPI param[4],枚举类型:指定日志级别,取值为[LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_CRITICAL],默认为LOG_DEBUG param[5],bool类型:是否show出图片 param[5],字典 :一些组件需要的参数,可以自己定义,例如端口号等等 return,bool类型,成功返回True,失败返回False ''' if not deviceAPI.Initialize('908fedc0', False, 720, 1280, '/tmp/LogDeviceAPI', LOG_DEBUG): return False ''' describe:获取当前图像帧 return:Mat类型的图像 ''' frame = deviceAPI.GetFrame() if frame is None: return False ''' ========================================================================================================== ============================================TouchCMD================================================== ========================================================================================================== describe:让手机执行动作 aType参数表示动作类型[TOUCH_CLICK, TOUCH_DOWN, TOUCH_UP, TOUCH_SWIPE, TOUCH_MOVE] sx为x坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的x坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的x坐标 sy为y坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的y坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的y坐标 ex为x坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的x坐标 ex为y坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的y坐标 DaType为执行该操作的方式,有minitouch方式和ADB命令方式,分别表示为[DACT_TOUCH, DACT_ADB],默认为DACT_TOUCH contact为触点,默认为0 durationMS为执行一次动作持续的时间,在aType为[TOUCH_CLICK, TOUCH_SWIPE]时使用,当aType为TOUCH_CLICK时默认为-1,当aType为TOUCH_SWIPE时默认为50 needUp仅在aType为TOUCH_SWIPE时使用,表示滑动后是否需要抬起,默认为True return:True or False ''' # deviceAPI.TouchCMD(aType=[TOUCH_CLICK, TOUCH_DOWN, TOUCH_UP, TOUCH_SWIPE, TOUCH_MOVE], # sx=int, # sy=int, # ex=int, # ey=int, # contact=0, # durationMS=50, # needUp=True, # wait_time=0) ''' describe:执行点击操作 sx为横坐标,相对于初始化时传入的坐标系 sy为纵坐标,相对于初始化时传入的坐标系 contact为触点,默认为0 durantionMS为动作持续时间,默认为-1 wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0 return True or False ''' if not deviceAPI.TouchCMD(aType=TOUCH_CLICK, sx=300, sy=300, contact=0, durantionMS=-1, wait_time=0): return False ''' describe:执行按压操作 sx为横坐标,相对于初始化时传入的坐标系 sy为纵坐标,相对于初始化时传入的坐标系 contact为触点,默认为0 wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0 return True or False ''' if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=300, sy=300, contact=0, wait_time=0): return False ''' describe:执行抬起操作 wait_time为执行动作后,手机端等待时间,单位为秒,默认为0 return True or False ''' if not deviceAPI.TouchCMD(aType=TOUCH_UP, contact=1, wait_time=0): return False ''' describe:执行滑动 sx, sy为起始点的坐标 ex, ey为终止点的坐标 DaType表示执行动作的实现方式,有minitouch和ADB两种[DACT_TOUCH, DACT_ADB],默认为DACT_TOUCH contact为触点,默认为0 durantionMS为动作持续时间,默认为50 needUp表示滑动后是否抬起,默认为True wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0 return True or False ''' if not deviceAPI.TouchCMD(aType=TOUCH_SWIPE, sx=500, sy=500, ex=600, ey=600, contact=0, durationMS=500, needUp=False, wait_time=0): return False ''' describe:执行滑动操作,与swipe不同的是他只有终止点,通过多个move可以组合成一个swipe sx为横坐标,相对于初始化时传入的坐标系 sy为纵坐标,相对于初始化时传入的坐标系 contact为触点,默认为0 wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0 return True or False ''' if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=300, sy=300, contact=0, wait_time=0): return False ''' describe:执行滑动操作,与move不同的是它进行了补点操作 sx为横坐标,相对于初始化时传入的坐标系 sy为纵坐标,相对于初始化时传入的坐标系 contact为触点,默认为0 wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0 return True or False ''' if not deviceAPI.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=300, sy=300, durationMS=50, contact=0, wait_time=0): return False ''' describe:reset wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0 return True or False ''' if not deviceAPI.TouchCMD(aType=TOUCH_RESET, wait_time=0): return False ''' ========================================================================================================== ============================================DeviceCMD================================================= ========================================================================================================== describe:执行设备相关的操作 aType:操作类型[DEVICE_INSTALL, DEVICE_START, DEVICE_EXIT, DEVICE_CURAPP, DEVICE_CLEARAPP, DEVICE_KEY, DEVICE_TEXT, DEVICE_SLEEP, DEVICE_WAKE, DEVICE_WMSIZE, DEVICE_BINDRO, DEVICE_SCREENSHOT, DEVICE_SCREENORI] APKPath:安装包路径 PKGName:包名 ActivityName:包的activity key: ''' # deviceAPI.DeviceCMD(aType=[DEVICE_INSTALL, DEVICE_START, DEVICE_EXIT, DEVICE_CURAPP, DEVICE_CLEARAPP, DEVICE_KEY, # DEVICE_TEXT, DEVICE_SLEEP, DEVICE_WAKE, DEVICE_WMSIZE, DEVICE_BINDRO, DEVICE_SCREENSHOT, # DEVICE_SCREENORI], # APKPath=str, # PKGName=str, # ActivityName=str, # key=str, # text=str, # rotation=str, # targetPath=str) ''' aType为DEVICE_INSTALL时表示安装app APKPath为所需参数,表示apk包在PC端的存放路径 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_INSTALL, APKPath='/home/ting/kidting/game_ai_sdk/data/qqspeed/game.apk'): return False ''' aType为DEVICE_START时表示启动app APKPath为所需参数,表示apk包在PC端的存放路径 ActivityName为apk包启动的activity return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_START, PKGName='com.tencent.tmgp.speedmobile', ActivityName='com.tencent.tmgp.speedmobile.speedmobile'): return False ''' aType为DEVICE_CURAPP时表示获取当前app return 字典,currentAPP = {'package': str(), 'activity': str()} ''' currentAPP = deviceAPI.DeviceCMD(aType=DEVICE_CURAPP) ''' aType为DEVICE_PARAM时表示获取当前app运行时,手机的性能参数 PKGName为所需参数,表示APP包名 return deviceParam为字典,分别存有CPU, 内存, 电量, 温度这四个参数 deviceParam = { 'cpu': float, 'mem': float, 'temperature': float, 'battery': int } ''' deviceParam = deviceAPI.DeviceCMD(aType=DEVICE_PARAM, PKGName='com.tencent.tmgp.speedmobile') ''' aType为DEVICE_CLEARAPP时表示清空app数据 PKGName为所需参数,表示APP包名 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_CLEARAPP, PKGName='com.tencent.tmgp.speedmobile'): return False ''' aType为DEVICE_EXIT时表示退出app PKGName为所需参数,表示APP包名 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_EXIT, PKGName='com.tencent.tmgp.speedmobile'): return False ''' aType为DEVICE_KEY时表示输入手机键盘的按键 key为所需参数,str类型,表示手机具体按键 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_KEY, key='cmd'): return False ''' aType为DEVICE_TEXT时表示输入字符串 text为所需参数,str类型,表示具体输入的字符串 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_TEXT, text='abc'): return False ''' aType为DEVICE_SLEEP时表示设备锁屏 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_SLEEP): return False ''' aType为DEVICE_WAKE时表示设备解锁启动 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_WAKE): return False ''' aType为DEVICE_WMSIZE时表示获取设备的分辨率 return height, width ''' height, width = deviceAPI.DeviceCMD(aType=DEVICE_WMSIZE) if height == -1 or width == -1: return False # ''' # aType为DEVICE_BINDRO时表示设置设备锁定朝向 # return height, width # ''' # height, width = deviceAPI.DeviceCMD(aType=DEVICE_BINDRO) # if height == -1 or width == -1: # return False ''' aType为DEVICE_SCREENSHOT时表示快照,截屏 targetPath表示在PC端存放的路径 return True or False ''' if not deviceAPI.DeviceCMD(aType=DEVICE_SCREENSHOT, targetPath='./test.png'): return False ''' aType为DEVICE_SCREENORI时表示返回设备当前时横屏还是竖屏 return UI_SCREEN_ORI_PORTRAIT or UI_SCREEN_ORI_LANDSCAPE ''' res = deviceAPI.DeviceCMD(aType=DEVICE_SCREENORI) if res == UI_SCREEN_ORI_PORTRAIT: print('竖屏') elif res == UI_SCREEN_ORI_LANDSCAPE: print('横屏') else: return False ''' describe:获取最大触点数 return int ''' maxContact = deviceAPI.DeviceCMD(aType=DEVICE_MAXCONTACT) if maxContact < 0: return False ''' describe:用ADB命令执行点击操作 return int ''' if not deviceAPI.DeviceCMD(aType=DEVICE_CLICK, px=300, py=300): return False ''' describe:用ADB命令执行滑动操作(需要先执行点击后,才能看到滑动效果,将会瞬间滑动到指定的坐标上) return int ''' if not deviceAPI.DeviceCMD(aType=DEVICE_SWIPE, sx=300, sy=300, ex=500, ey=500, durationMS=50): return False ''' describe:等待所有指令发送至手机端,在程序退出时使用 ''' deviceAPI.Finish() ''' ========================================================================================================== ========================================================================================================== ========================================================================================================== ''' def demo1(): # deviceAPI1 = IDeviceAPI('Android', 'PlatformWeTest') # deviceAPI2 = IDeviceAPI('Android', 'PlatformWeTest') deviceAPI1 = IDeviceAPI('Android') deviceAPI2 = IDeviceAPI('Android') deviceAPI1.Initialize(deviceSerial='4da2dea3', height=200, width=1280, logDir='./log', minitouchPort=1122, minicapPort=1133) deviceAPI2.Initialize(deviceSerial='9889db384258523633', height=200, width=1280, logDir='./log', minitouchPort=1144, minicapPort=1155) # maxContact = deviceAPI.DeviceCMD(aType=DEVICE_MAXCONTACT) # begin = time.time() # for i in range(10): # if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=300, sy=300, durationMS=1000, wait_time=1000): # print('click failed') # end = time.time() # print(end - begin) # # if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=100, sy=100, wait_time=1000): # print('click failed') # # if not deviceAPI.TouchCMD(aType=TOUCH_UP): # print('up failed') # if not deviceAPI.TouchCMD(aType=TOUCH_CLICK, sx=500, sy=500, contact=0, durantionMS=50, wait_time=1000): # return False # if not deviceAPI1.DeviceCMD(aType=DEVICE_SWIPE, sx=640, sy=100, ex=640, ey=300, durationMS=1000): # print('click failed') # time.sleep(100000) # return None if not deviceAPI1.TouchCMD(aType=TOUCH_DOWN, sx=640, sy=100, wait_time=1000): print('click failed') if not deviceAPI2.TouchCMD(aType=TOUCH_DOWN, sx=200, sy=200, wait_time=50): print('click failed') if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=640, sy=300, durationMS=1000, contact=0, wait_time=1000): return False if not deviceAPI2.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=100, durationMS=1000, contact=0, wait_time=1000): return False if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=100, durationMS=1000, contact=0, wait_time=1000): return False if not deviceAPI2.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=200, sy=200, durationMS=1000, contact=0, wait_time=1000): return False # print(maxContact) # if not deviceAPI.TouchCMD(aType=TOUCH_SWIPE, sx=200, sy=200, ex=400, ey=400, wait_time=1000, durationMS=500): # print('swipe failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=300, sy=300, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=500, sy=500, contact=1, wait_time=1000): # print('click failed') # return False # for i in range(10): # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, contact=1, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, contact=1, wait_time=1000): # print('click failed') # return False # # time.sleep(1) # # if not deviceAPI.TouchCMD(aType=TOUCH_UP, contact=1, wait_time=1000): # print('click failed') # return False # # if not deviceAPI.TouchCMD(aType=TOUCH_RESET): # print('reset failed') # return False time.sleep(5) for i in range(100000): frame1 = deviceAPI1.GetFrame() frame2 = deviceAPI2.GetFrame() if frame1 is not None: cv2.imshow('test1', frame1) cv2.waitKey(1) if frame2 is not None: cv2.imshow('test2', frame2) cv2.waitKey(1) # #time.sleep(1) def demo(): # deviceAPI1 = IDeviceAPI('Android', 'PlatformWeTest') deviceAPI1 = IDeviceAPI('Android') flag, strerror = deviceAPI1.Initialize(isPortrait=False, long_edge=1280, logDir='./log', level=LOG_INFO, showRawScreen=False) print(flag) print(strerror) # maxContact = deviceAPI.DeviceCMD(aType=DEVICE_MAXCONTACT) # begin = time.time() # for i in range(10): # if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=300, sy=300, durationMS=1000, wait_time=1000): # print('click failed') # end = time.time() # print(end - begin) # # if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=100, sy=100, wait_time=1000): # print('click failed') # # if not deviceAPI.TouchCMD(aType=TOUCH_UP): # # print('up failed') # pkgName = deviceAPI1.DeviceCMD(aType=DEVICE_CURAPP) # parameter= deviceAPI1.DeviceCMD(aType=DEVICE_PARAM, PKGName=pkgName['package']) # print(parameter) # exit(0) if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=1130, sy=442, contact=0, durationMS=5000, wait_time=1000): return False if not deviceAPI1.DeviceCMD(aType=DEVICE_SWIPE, sx=640, sy=100, ex=640, ey=300, durationMS=1000): print('click failed') # time.sleep(100000) # return None # if not deviceAPI1.TouchCMD(aType=TOUCH_DOWN, sx=100, sy=100, wait_time=5000): # print('click failed') # if not deviceAPI1.TouchCMD(aType=TOUCH_UP): # print('up failed') # begin = time.time() if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=1270, sy=300, durationMS=5000, wait_time=1000): print('click failed') # end = time.time() # print("action:{}".format(end - begin)) begin = time.time() if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=300, durationMS=1000, contact=0, wait_time=1000): return False if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=100, durationMS=1000, contact=0, wait_time=1000): return False end = time.time() # print("action:{}".format(end - begin)) # print(maxContact) # if not deviceAPI.TouchCMD(aType=TOUCH_SWIPE, sx=200, sy=200, ex=400, ey=400, wait_time=1000, durationMS=500): # print('swipe failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=300, sy=300, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=500, sy=500, contact=1, wait_time=1000): # print('click failed') # return False # for i in range(10): # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, contact=1, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, wait_time=1000): # print('click failed') # return False # if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, contact=1, wait_time=1000): # print('click failed') # return False # # time.sleep(1) # # if not deviceAPI.TouchCMD(aType=TOUCH_UP, contact=1, wait_time=1000): # print('click failed') # return False # # if not deviceAPI.TouchCMD(aType=TOUCH_RESET): # print('reset failed') # return False # print('action down') time.sleep(5) count = 0 abegin = time.time() while True: begin = time.time() frame1, error = deviceAPI1.GetFrame() end = time.time() # print('getframe: {0}', format(count)) # print('getframe: {0}', format(end - begin)) if frame1 is not None: # cv2.imwrite('test.png', frame1) count += 1 # if count == 500: # break cv2.imshow('test1', frame1) cv2.waitKey(1) aend = time.time() # print((aend - abegin)/501) if __name__ == '__main__': # sample() demo()
35.792776
138
0.581293
5524fc0dc97256ccf31fb1155e3a925dafa5cac5
3,014
py
Python
DeepLearningExamples/CUDA-Optimized/FastSpeech/fastspeech/utils/tensorboard.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
DeepLearningExamples/CUDA-Optimized/FastSpeech/fastspeech/utils/tensorboard.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
DeepLearningExamples/CUDA-Optimized/FastSpeech/fastspeech/utils/tensorboard.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import matplotlib.pyplot as plt import numpy as np import cv2 import data as global_data plt.switch_backend('Agg') def image_plot(x, name='image'): fig, ax = plt.subplots() ax.imshow(x, cmap='magma', aspect='auto') fig.canvas.draw() buf = np.array(fig.canvas.renderer._renderer) plt.clf() plt.close('all') cv2.imshow(name, buf) cv2.waitKey(0) def plot_to_buf(x, align=True): fig, ax = plt.subplots() ax.plot(x) if align: ax.set_ylim([-1, 1]) fig.canvas.draw() im = np.array(fig.canvas.renderer._renderer) plt.clf() plt.close('all') return np.rollaxis(im[..., :3], 2) def imshow_to_buf(x, scale01=False): def softmax(x): """Compute softmax values for each sets of scores in x.""" return np.exp(x) / np.sum(np.exp(x), axis=0) if scale01: x = (x - x.min()) / (x.max() - x.min()) if x.max() > 1.: x = softmax(x) if len(x.shape) == 3: x = x[0] fig, ax = plt.subplots() ax.imshow(x, cmap='magma', aspect='auto') fig.canvas.draw() im = np.array(fig.canvas.renderer._renderer) plt.clf() plt.close('all') return np.rollaxis(im[..., :3], 2) def origin_to_chrs(target): results = [] for t in target: idx = t - 1 if t - 1 >= 0 else 0 if idx < len(global_data.idx2chr): results.append(global_data.idx2chr[idx]) else: break return ''.join(results)
35.458824
81
0.677837
00762ba4db8cca9f903390c5b28d9a7a73df1107
157
py
Python
scripts_for_analysis/clade_specific_ogs.py
jls943/sponge_evol_dynamics
54a1dbdca096f0a30e01d55ad840764d5ba2a350
[ "CC0-1.0" ]
null
null
null
scripts_for_analysis/clade_specific_ogs.py
jls943/sponge_evol_dynamics
54a1dbdca096f0a30e01d55ad840764d5ba2a350
[ "CC0-1.0" ]
null
null
null
scripts_for_analysis/clade_specific_ogs.py
jls943/sponge_evol_dynamics
54a1dbdca096f0a30e01d55ad840764d5ba2a350
[ "CC0-1.0" ]
null
null
null
#! /usr/bin/env python3 #function to identify all of the orthogroups that contain only those species from a specific clade, #provided as a list of species.
31.4
99
0.77707
127d66bcdd30edba166b156134f3f76c93503971
1,693
py
Python
perfkitbenchmarker/linux_packages/iperf.py
robfrut135/PerfKitBenchmarker
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
[ "Apache-2.0" ]
2
2019-04-09T11:15:13.000Z
2019-09-10T11:28:49.000Z
perfkitbenchmarker/linux_packages/iperf.py
robfrut135/PerfKitBenchmarker
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
[ "Apache-2.0" ]
1
2021-03-26T00:41:05.000Z
2021-03-26T00:41:05.000Z
perfkitbenchmarker/linux_packages/iperf.py
robfrut135/PerfKitBenchmarker
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
[ "Apache-2.0" ]
1
2021-12-07T13:29:48.000Z
2021-12-07T13:29:48.000Z
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing iperf installation and cleanup functions.""" import re from perfkitbenchmarker import errors IPERF_EL6_RPM = ('http://pkgs.repoforge.org/iperf/' 'iperf-2.0.4-1.el6.rf.x86_64.rpm') IPERF_EL7_RPM = ('http://pkgs.repoforge.org/iperf/' 'iperf-2.0.4-1.el7.rf.x86_64.rpm') def _Install(vm): """Installs the iperf package on the VM.""" vm.InstallPackages('iperf') def YumInstall(vm): """Installs the iperf package on the VM.""" try: vm.InstallEpelRepo() _Install(vm) # RHEL 7 does not have an iperf package in the standard/EPEL repositories except errors.VirtualMachine.RemoteCommandError as e: stdout, _ = vm.RemoteCommand('cat /etc/redhat-release') major_version = int(re.search('release ([0-9])', stdout).group(1)) if major_version == 6: iperf_rpm = IPERF_EL6_RPM elif major_version == 7: iperf_rpm = IPERF_EL7_RPM else: raise e vm.RemoteCommand('sudo rpm -ivh %s' % iperf_rpm) def AptInstall(vm): """Installs the iperf package on the VM.""" _Install(vm)
31.351852
75
0.707029
e8b8bc83de272edd5da21c39f360336d9ff383bc
3,531
py
Python
tests/components/sharkiq/test_config_flow.py
pcaston/core
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
[ "Apache-2.0" ]
1
2021-07-08T20:09:55.000Z
2021-07-08T20:09:55.000Z
tests/components/sharkiq/test_config_flow.py
pcaston/core
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
[ "Apache-2.0" ]
47
2021-02-21T23:43:07.000Z
2022-03-31T06:07:10.000Z
tests/components/sharkiq/test_config_flow.py
OpenPeerPower/core
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
[ "Apache-2.0" ]
null
null
null
"""Test the Shark IQ config flow.""" from unittest.mock import patch import aiohttp import pytest from sharkiqpy import AylaApi, SharkIqAuthError from openpeerpower import config_entries, setup from openpeerpower.components.sharkiq.const import DOMAIN from openpeerpower.core import OpenPeerPower from .const import CONFIG, TEST_PASSWORD, TEST_USERNAME, UNIQUE_ID from tests.common import MockConfigEntry async def test_form(opp): """Test we get the form.""" await setup.async_setup_component(opp, "persistent_notification", {}) result = await opp.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} with patch("sharkiqpy.AylaApi.async_sign_in", return_value=True), patch( "openpeerpower.components.sharkiq.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await opp.config_entries.flow.async_configure( result["flow_id"], CONFIG, ) assert result2["type"] == "create_entry" assert result2["title"] == f"{TEST_USERNAME:s}" assert result2["data"] == { "username": TEST_USERNAME, "password": TEST_PASSWORD, } await opp.async_block_till_done() mock_setup_entry.assert_called_once() @pytest.mark.parametrize( "exc,base_error", [ (SharkIqAuthError, "invalid_auth"), (aiohttp.ClientError, "cannot_connect"), (TypeError, "unknown"), ], ) async def test_form_error(opp: OpenPeerPower, exc: Exception, base_error: str): """Test form errors.""" result = await opp.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch.object(AylaApi, "async_sign_in", side_effect=exc): result2 = await opp.config_entries.flow.async_configure( result["flow_id"], CONFIG, ) assert result2["type"] == "form" assert result2["errors"].get("base") == base_error async def test_reauth_success(opp: OpenPeerPower): """Test reauth flow.""" with patch("sharkiqpy.AylaApi.async_sign_in", return_value=True): mock_config = MockConfigEntry(domain=DOMAIN, unique_id=UNIQUE_ID, data=CONFIG) mock_config.add_to_opp(opp) result = await opp.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_REAUTH, "unique_id": UNIQUE_ID}, data=CONFIG, ) assert result["type"] == "abort" assert result["reason"] == "reauth_successful" @pytest.mark.parametrize( "side_effect,result_type,msg_field,msg", [ (SharkIqAuthError, "form", "errors", "invalid_auth"), (aiohttp.ClientError, "abort", "reason", "cannot_connect"), (TypeError, "abort", "reason", "unknown"), ], ) async def test_reauth( opp: OpenPeerPower, side_effect: Exception, result_type: str, msg_field: str, msg: str, ): """Test reauth failures.""" with patch("sharkiqpy.AylaApi.async_sign_in", side_effect=side_effect): result = await opp.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_REAUTH, "unique_id": UNIQUE_ID}, data=CONFIG, ) msg_value = result[msg_field] if msg_field == "errors": msg_value = msg_value.get("base") assert result["type"] == result_type assert msg_value == msg
30.973684
86
0.658737
3d7bf904e5854b6e1813556d06c94afeccbcb731
2,283
py
Python
amime/modules/anime/TV-SHOW/SEASON/Musim/2022 ANIME/KTGR MUSIM/GUGUR/01 FALL.py
Myudi422/ccgnime_req
a0f7596ba101204539b4120dffa08912b6560efe
[ "MIT" ]
null
null
null
amime/modules/anime/TV-SHOW/SEASON/Musim/2022 ANIME/KTGR MUSIM/GUGUR/01 FALL.py
Myudi422/ccgnime_req
a0f7596ba101204539b4120dffa08912b6560efe
[ "MIT" ]
null
null
null
amime/modules/anime/TV-SHOW/SEASON/Musim/2022 ANIME/KTGR MUSIM/GUGUR/01 FALL.py
Myudi422/ccgnime_req
a0f7596ba101204539b4120dffa08912b6560efe
[ "MIT" ]
null
null
null
import httpx from anilist.types import Anime from pyrogram import filters from pyrogram.types import CallbackQuery from pyromod.helpers import ikb from pyromod.nav import Pagination from amime.amime import Amime @Amime.on_callback_query(filters.regex(r"^fall_2022 anime (?P<page>\d+)")) async def anime_suggestions(bot: Amime, callback: CallbackQuery): page = int(callback.matches[0]["page"]) message = callback.message lang = callback._lang keyboard = [] async with httpx.AsyncClient(http2=True) as client: response = await client.post( url="https://graphql.anilist.co", json=dict( query=""" query($page: Int, $perPage: Int) { Page(page: $page, perPage: $perPage) { media(type: ANIME, format: TV, sort: TRENDING_DESC, seasonYear: 2022, season: FALL) { id title { romaji english native } siteUrl } } } """, variables=dict( perPage=100, ), ), headers={ "Content-Type": "application/json", "Accept": "application/json", }, ) data = response.json() await client.aclose() if data["data"]: items = data["data"]["Page"]["media"] suggestions = [ Anime(id=item["id"], title=item["title"], url=item["siteUrl"]) for item in items ] layout = Pagination( suggestions, item_data=lambda i, pg: f"menu {i.id}", item_title=lambda i, pg: i.title.romaji, page_data=lambda pg: f"fall_2022 anime {pg}", ) lines = layout.create(page, lines=8) if len(lines) > 0: keyboard += lines keyboard.append([(lang.back_button, "2022_se")]) await message.edit_text( lang.suggestions_text, reply_markup=ikb(keyboard), )
31.273973
109
0.476128
748af053d3e3556ccef43d34a66cb27502407b8f
28,521
py
Python
model_search/phoenix.py
dywsjtu/model_search
116c4f9016d8b89cf06d057dda020dae3371f211
[ "Apache-2.0" ]
3,315
2021-01-20T15:21:37.000Z
2022-03-30T18:21:29.000Z
model_search/phoenix.py
dywsjtu/model_search
116c4f9016d8b89cf06d057dda020dae3371f211
[ "Apache-2.0" ]
57
2021-01-19T20:51:03.000Z
2022-03-24T11:04:07.000Z
model_search/phoenix.py
dywsjtu/model_search
116c4f9016d8b89cf06d057dda020dae3371f211
[ "Apache-2.0" ]
380
2021-02-20T01:31:35.000Z
2022-03-31T16:48:58.000Z
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """A Phoenix estimator builder.""" from absl import logging import kerastuner from model_search import controller from model_search import ensembler from model_search import hparam as hp from model_search import loss_fns from model_search import metric_fns from model_search import task_manager from model_search.architecture import architecture_utils from model_search.ensembler import EnsembleLogits from model_search.generators import base_tower_generator from model_search.generators import trial_utils from model_search.meta import distillation from model_search.meta import transfer_learning from model_search.metadata import ml_metadata_db from model_search.proto import phoenix_spec_pb2 from model_search.proto import transfer_learning_spec_pb2 import numpy as np import tensorflow.compat.v2 as tf REPLAY_CONFIG_FILENAME = "replay_config.pbtxt" _TL_HOOKS = { transfer_learning_spec_pb2.TransferLearningSpec .UNIFORM_AVERAGE_TRANSFER_LEARNING: transfer_learning.UniformAverageTransferLearningHook, transfer_learning_spec_pb2.TransferLearningSpec .LOSS_WEIGHTED_AVERAGE_TRANSFER_LEARNING: transfer_learning.LossWeightedAverageTransferLearningHook, } def _merge_hparams(original_hparams, overrides): """Merges to hp.HParams objects.""" # make a copy hparams = hp.HParams(**original_hparams.values()) existing_ones = {k: v for k, v in overrides.values().items() if k in hparams} new_ones = {k: v for k, v in overrides.values().items() if k not in hparams} hparams.override_from_dict(existing_ones) for k, v in new_ones.items(): hparams.add_hparam(k, v) return hparams def _default_predictions_fn(logits, mode=tf.estimator.ModeKeys.TRAIN, temperature=1.0): """Converts logits to predictions dict. Assumes classification.""" new_logits = logits if mode == tf.estimator.ModeKeys.PREDICT and temperature != 1.0: assert temperature > 0 temp_const = tf.constant(1 / temperature, name="softmax_temperature_const") logging.info("Applying temperature to logits") new_logits = tf.multiply(logits, temp_const, name="softmax_temperature_mul") predictions = tf.math.argmax(input=new_logits, axis=-1) probabilities = tf.nn.softmax(new_logits) log_probabilities = tf.nn.log_softmax(new_logits) predictions_dict = { "predictions": predictions, "probabilities": probabilities, "log_probabilities": log_probabilities, "logits": logits } return predictions_dict class Estimator(tf.estimator.Estimator): """Estimator wrapper to add reporting to metadata storage after evaluation.""" def __init__(self, model_fn, model_dir=None, config=None, params=None, warm_start_from=None, metadata=None): tf.estimator.Estimator._assert_members_are_not_overridden = staticmethod( # pylint: disable=protected-access lambda _: None) super(Estimator, self).__init__( model_fn=model_fn, config=config, params=params, warm_start_from=warm_start_from) self._metadata = metadata self._model_dir = config.model_dir def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None, name=None): eval_results = super(Estimator, self).evaluate( input_fn=input_fn, steps=steps, hooks=hooks, checkpoint_path=checkpoint_path, name=name) if self._metadata is not None: native_results = {k: v.item() for k, v in eval_results.items()} logging.info("Saving the following evaluation dictionary.") logging.info(native_results) self._metadata.report(native_results, self._model_dir) return eval_results class Phoenix(object): """Phoenix: A smart search AutoML algorithm.""" def __init__(self, phoenix_spec, input_layer_fn, study_owner, study_name, head=None, logits_dimension=None, label_vocabulary=None, loss_fn=None, metric_fn=None, predictions_fn=None, metadata=None): """Constructs a Phoenix instance. Args: phoenix_spec: A `PhoenixSpec` proto with the spec for the run. input_layer_fn: A function that converts feature Tensors to input layer. See learning.autolx.model_search.data.Provider.get_input_layer_fn for details. study_owner: A string holding the ldap of the study owner. We use tuner platforms to conduct the various architectures training. This field specifies the study owner. study_name: A string holding the study name. head: A head to use with Phoenix for creating the loss and eval metrics. If no head is given, Phoenix falls back to using the loss_fn and metric_fn. N.B.: Phoenix creates its own EstimatorSpec so everything besides the loss and eval metrics returned by head will be ignored. logits_dimension: An int holding the dimension of the output. Must be provided if head is None. Will be ignored if head is not None. label_vocabulary: List or tuple with labels vocabulary. Needed only if the labels are of type string. This list is used by the loss function if loss_fn is not provided. It is also used in the metric function to create the accuracy metric ops. Use only with multiclass classification problems. loss_fn: A function to compute the loss. Ignored if `head` is not None. Must accept as inputs a `labels` Tensor, a `logits` Tensor, and optionally a `weights` Tensor. `weights` must either be rank 0 or have the same rank as labels. If None, Phoenix defaults to using softmax cross-entropy. metric_fn: Metrics for Tensorboard. Ignored if `head` is not None. metric_fn takes `label` and `predictions` as input, and outputs a dictionary of (tensor, update_op) tuples. `label` is a Tensor (in the single task case) or a dict of Tensors (in the case of multi-task, where the key of the dicts correspond to the task names). `predictions` is a dict of Tensors. In the single task case, it consists of `predictions`, `probabilities`, and `log_probabilities`. In the multi-task case, it consists of the same keys as that of the single task case, but also those corresponding to each task (e.g., predictions/task_name_1). See `metric_fns` for more detail. If `metric_fn` is None, it will include a metric for the number of parameters, accuracy (if logit_dimensions >= 2), and AUC metrics (if logit_dimensions == 2). predictions_fn: A function to convert eval logits to the `predictions` dictionary passed to metric_fn. If `None`, defaults to computing 'predictions', 'probabilities', and 'log_probabilities'. metadata: An object that implements metadata api in learning.adanets.phoenix.metadata.Metadata """ # Check Phoenix preconditions and fail early if any of them are broken. if phoenix_spec.multi_task_spec: # TODO(b/172564129): Add support for head and custom loss_fns in # multi-task. assert not head, "head is not supported for multi-task." if head: msg = "Do not specify {} when using head as head already contains it." assert not logits_dimension, msg.format("logits_dimension") assert not label_vocabulary, msg.format("label_vocabulary") assert not loss_fn, msg.format("loss_fn") assert not metric_fn, msg.format("metric_fn") # Check ensemble search / distillation preconditions. ensemble_spec = phoenix_spec.ensemble_spec distillation_spec = phoenix_spec.distillation_spec if trial_utils.has_distillation( distillation_spec) and trial_utils.has_ensemble_search( ensemble_spec ) and not trial_utils.is_intermixed_ensemble_search(ensemble_spec): ensemble_search_spec = ( ensemble_spec.nonadaptive_search if trial_utils.is_nonadaptive_ensemble_search(ensemble_spec) else ensemble_spec.adaptive_search) if (distillation_spec.minimal_pool_size == ensemble_search_spec.minimal_pool_size): logging.warning("minimal_pool_size is the same for ensemble spec and " "distillation spec, so distillation will be ignored.") self._phoenix_spec = phoenix_spec self._input_layer_fn = input_layer_fn self._ensembler = ensembler.Ensembler(phoenix_spec) self._distiller = distillation.Distiller(phoenix_spec.distillation_spec) self._study_owner = study_owner self._study_name = study_name self._head = head self._logits_dimension = ( self._head.logits_dimension if head else logits_dimension) self._label_vocabulary = label_vocabulary if self._label_vocabulary: assert self._logits_dimension == len(self._label_vocabulary) self._loss_fn = loss_fn or loss_fns.make_multi_class_loss_fn( label_vocabulary=label_vocabulary) self._user_specified_metric_fn = metric_fn self._predictions_fn = (predictions_fn or _default_predictions_fn) if metadata is None: self._metadata = ml_metadata_db.MLMetaData(phoenix_spec, study_name, study_owner) else: self._metadata = metadata self._task_manager = task_manager.TaskManager(phoenix_spec) self._controller = controller.InProcessController( phoenix_spec=phoenix_spec, metadata=self._metadata) @property def metadata(self): return self._metadata def _get_loss_fn(self, features, mode, my_id, teacher_logits_spec=None): """Gets the applicable loss_fn to use. If head is not None, wraps the head's loss function to match the interface Phoenix expects (see loss_fns.py), unless distillation is being used. Args: features: The features pass to model_fn. mode: The mode passed to model_fn. my_id: My trial id (integer). teacher_logits_spec: Logits of the teacher network to use when distilling. Returns: The loss_fn to use. """ if (mode == tf.estimator.ModeKeys.TRAIN and teacher_logits_spec is not None): return distillation.get_distillation_loss_fn( teacher_logits=teacher_logits_spec.logits, distillation_spec=self._phoenix_spec.distillation_spec, my_id=my_id, original_loss_fn=self._loss_fn) if not self._head: return self._loss_fn def head_loss_fn(labels, logits, weights=1.0): """Create a loss fn from the Head object.""" del weights # Head already has weights built in. training_loss = None # There is two types of head, and their api is different. if getattr(self._head, "loss", None) is not None: training_loss = self._head.loss( labels=labels, logits=logits, features=features, mode=mode) elif getattr(self._head, "create_loss", None) is not None: training_loss = self._head.create_loss( labels=labels, logits=logits, features=features, mode=mode).training_loss else: logging.fatal("unable to find loss function in Head object.") return training_loss return head_loss_fn def _make_model_fn(self, run_config, train_steps, use_tpu=False): """Returns a model_fn for the estimator.""" def model_fn(features, labels, mode, params): """Model function that wraps the model specified.""" self._metric_fn = self._user_specified_metric_fn self._default_metric_fn_list = [] if self._logits_dimension >= 2: self._default_metric_fn_list.append( metric_fns.make_accuracy_metric_fn(self._label_vocabulary)) if self._logits_dimension == 2: self._default_metric_fn_list += [ metric_fns.make_auc_roc_metric_fn(self._label_vocabulary), metric_fns.make_auc_pr_metric_fn(self._label_vocabulary) ] my_id = architecture_utils.DirectoryHandler.get_trial_id( run_config.model_dir, self._phoenix_spec) # Create a copy of hparams hparams = params if my_id <= len(self._phoenix_spec.user_suggestions): hparams = _merge_hparams( params, hp.HParams.from_proto( self._phoenix_spec.user_suggestions[my_id - 1].hparams)) # When predicting for RNN, we might not need the length. is_training = mode == tf.estimator.ModeKeys.TRAIN lengths_feature_name = self._phoenix_spec.lengths_feature_name if mode == tf.estimator.ModeKeys.PREDICT: if isinstance(features, dict) and lengths_feature_name not in features: lengths_feature_name = "" shared_input_tensor = None shared_lengths = None # Create the input. if self._phoenix_spec.is_input_shared: shared_input_tensor, shared_lengths = self._input_layer_fn( features=features, is_training=is_training, scope_name="Phoenix/SharedInput", lengths_feature_name=lengths_feature_name) # Get all information we have so far. trials = [] # TODO(b/172564129): Only the chief needs the trials. Test to see if # workers need them if not self._phoenix_spec.HasField("replay"): trials = self._metadata.get_completed_trials() else: hparams = _merge_hparams( hparams, hp.HParams.from_proto(self._phoenix_spec.replay.towers[my_id - 1].hparams)) hparams.set_hparam( "initial_architecture", self._phoenix_spec.replay.towers[my_id - 1].architecture[:]) # Update our database - clean up and sync ops. if run_config.is_chief: self._metadata.before_generating_trial_model(my_id, run_config.model_dir) # Determine whether to do ensemble search or distillation on this trial. trial_mode = trial_utils.get_trial_mode( self._phoenix_spec.ensemble_spec, self._phoenix_spec.distillation_spec, my_id) generators = self._controller.get_generators(my_id, trials) logit_specs = {} architectures = {} for name, generator in generators.items(): logging.info(generators) logit_spec, architecture = generator.instance.generate( features=features, input_layer_fn=self._input_layer_fn, trial_mode=trial_mode, shared_input_tensor=shared_input_tensor, shared_lengths=shared_lengths, logits_dimension=self._logits_dimension, hparams=hparams, run_config=run_config, is_training=is_training, trials=generator.relevant_trials) logit_specs[name] = logit_spec architectures[name] = architecture training_hooks = [] # TODO(b/172564129): Figure out how to handle transfer learning for multi # task. Install transfer learning hook. if (is_training and self._phoenix_spec.transfer_learning_spec.transfer_learning_type in _TL_HOOKS): tower_name = base_tower_generator.SEARCH_GENERATOR vars_to_warm_start = architecture_utils.get_tower_variables(tower_name) if vars_to_warm_start: hook_fn = _TL_HOOKS[ self._phoenix_spec.transfer_learning_spec.transfer_learning_type] tl_spec = self._phoenix_spec.transfer_learning_spec tl_hook = hook_fn( vars_to_warm_start=vars_to_warm_start, current_trial_id=my_id, completed_trials=trials, discount_factor=tl_spec.previous_trials_discount_factor, max_completed_trials=tl_spec.max_completed_trials, model_dir=run_config.model_dir) training_hooks.append(tl_hook) learning_rate_spec_keys = [ "learning_rate", "l2_regularization", "gradient_max_norm", "exponential_decay_steps", "exponential_decay_rate" ] learning_rate_spec = { key: value for key, value in hparams.values().items() if key in learning_rate_spec_keys } # Create logits of the ensemble and training ops, checking whether to # calculate using the Ensembler or the Distiller. teacher_logits = None tower_name = None priors_logits_specs = [] if base_tower_generator.PRIOR_GENERATOR in logit_specs.keys(): priors_logits_specs = logit_specs[base_tower_generator.PRIOR_GENERATOR] if base_tower_generator.REPLAY_GENERATOR in logit_specs.keys(): priors_logits_specs = logit_specs[base_tower_generator.REPLAY_GENERATOR] if trial_mode == trial_utils.TrialMode.ENSEMBLE_SEARCH: ensemble_logits = self._ensembler.bundle_logits( priors_logits_specs=priors_logits_specs, search_logits_specs=logit_specs.get( base_tower_generator.SEARCH_GENERATOR, []), logits_dimension=self._logits_dimension) elif trial_mode == trial_utils.TrialMode.DISTILLATION: # TODO(b/146067345): Initialize some random architecture if search # logits specs is empty. ensemble_logits = self._distiller.bundle_logits( priors_logits_specs=priors_logits_specs, search_logits_specs=logit_specs.get( base_tower_generator.SEARCH_GENERATOR, [])) teacher_logits = ensemble_logits.teacher_logits_spec tower_name = base_tower_generator.SEARCH_GENERATOR else: ensemble_logits = EnsembleLogits( train_logits_specs=logit_specs.get( base_tower_generator.SEARCH_GENERATOR, []), eval_logits_spec=logit_specs.get( base_tower_generator.SEARCH_GENERATOR, [])[0]) # Create the metric_fn if it wasn't specified. if not self._metric_fn: metric_fn = metric_fns.create_num_parameters_metric_fn(tower_name) self._default_metric_fn_list.append(metric_fn) self._metric_fn = metric_fns.combine_metric_fns( self._default_metric_fn_list) model_spec = self._task_manager.create_model_spec( features=features, params=hparams, learning_rate_spec=learning_rate_spec, use_tpu=use_tpu, train_logits_specs=ensemble_logits.train_logits_specs, eval_logits_spec=ensemble_logits.eval_logits_spec, labels=labels, mode=mode, lengths=shared_lengths, loss_fn=self._get_loss_fn(features, mode, my_id, teacher_logits), model_directory=run_config.model_dir, predictions_fn=self._predictions_fn) if run_config.is_chief: self._metadata.after_generating_trial_model(my_id) search_architecture = architectures.get( base_tower_generator.SEARCH_GENERATOR, [["no_search"]]) trial_utils.write_replay_spec( model_dir=run_config.model_dir, filename=REPLAY_CONFIG_FILENAME, original_spec=self._phoenix_spec, search_architecture=search_architecture[0], hparams=hparams) # No need to add train op for the eval graph. train_op = None if is_training: train_op = self._increment_global_step( model_spec.train_op, train_steps, base_tower_generator.SEARCH_GENERATOR) if isinstance(labels, dict): label_weights = [ label_spec.weight_feature_name for label_spec in self._phoenix_spec.multi_task_spec if not label_spec.weight_is_a_feature ] actual_labels = { name: label for name, label in labels.items() if name not in label_weights } else: actual_labels = labels if use_tpu: eval_metrics = None weights = None if self._phoenix_spec.weight_feature_name: weights = features[self._phoenix_spec.weight_feature_name] if mode != tf.estimator.ModeKeys.PREDICT and not self._head: eval_metrics = (self._metric_fn, [actual_labels, model_spec.predictions, weights]) return tf.compat.v1.estimator.tpu.TPUEstimatorSpec( mode=mode, loss=model_spec.loss, predictions=model_spec.predictions, train_op=train_op, eval_metrics=eval_metrics, training_hooks=training_hooks) eval_metric_ops = None if mode != tf.estimator.ModeKeys.PREDICT and not self._head: weights = None if self._phoenix_spec.weight_feature_name: weights = features[self._phoenix_spec.weight_feature_name] eval_metric_ops = self._metric_fn(actual_labels, model_spec.predictions, weights) if self._head: return self._head.create_estimator_spec( features, mode, ensemble_logits.eval_logits_spec.logits, labels, train_op_fn=lambda _: train_op) return tf.estimator.EstimatorSpec( mode=mode, loss=model_spec.loss, predictions=model_spec.predictions, train_op=train_op, training_hooks=training_hooks + model_spec.train_hooks, eval_metric_ops=eval_metric_ops) return model_fn # TODO(b/172564129): Move increment_global_step to TaskManager. # TODO(b/172564129): Figure out how to set train steps for multi-task. def _increment_global_step(self, train_op, train_steps, tower_name): """Increments the global step based on the tower size. N.B. if the tower size does not divide evenly into the train_steps, it will train for longer than required. Args: train_op: The train_op to execute before incrementing the global_step. train_steps: The total number of steps to train for. tower_name: The name of the tower which is currently training. Returns: An tf.Op which increments the global step by the required amount. """ if self._phoenix_spec.use_synchronous_optimizer: return train_op increment_amount = 1 tower_size = architecture_utils.get_architecture_size(tower_name) if (self._phoenix_spec.use_parameter_scaled_training and tower_size): train_step_per_block = max( int(train_steps // self._phoenix_spec.maximum_depth), 1) tower_train_steps = tower_size * train_step_per_block increment_amount = max(int(train_steps // tower_train_steps), 1) with tf.control_dependencies([train_op]): global_step = tf.compat.v1.train.get_or_create_global_step() return tf.compat.v1.assign_add(global_step, increment_amount) def get_estimator(self, run_config, hparams, train_steps): """Returns a Phoenix `Estimator` for train and evaluation. Args: run_config: `RunConfig` object to configure the runtime settings. hparams: `HParams` instance defining custom hyperparameters. train_steps: The total number of training steps. Returns: Returns an `Estimator`. Raises: ValueError: in case flatten is used as a search block or is missing from the initial architecture. """ if not all("FLATTEN" not in block for block in hparams.new_block_type): raise ValueError("Flatten cannot be a search block type") return Estimator( model_fn=self._make_model_fn( run_config=run_config, train_steps=train_steps, use_tpu=False), config=run_config, params=hparams, metadata=self._metadata) def get_tpu_estimator(self, run_config, hparams, train_steps, train_batch_size, eval_on_tpu, embedding_config_spec=None, eval_batch_size=None): """Returns a Phoenix `Estimator` for train and evaluation. Args: run_config: `RunConfig` object to configure the runtime settings. hparams: `HParams` instance defining custom hyperparameters. train_steps: The total number of training steps. train_batch_size: batch size for train. eval_on_tpu: whether to use tpu for evaluation. embedding_config_spec: (Optional) Embedding config spec instance. eval_batch_size: (Optional) if not set, we use train batch size. Returns: Returns an `TPUEstimator`. Raises: ValueError: in case flatten is used as a search block or is missing from the initial architecture. """ if not all("FLATTEN" not in block for block in hparams.new_block_type): raise ValueError("Flatten cannot be a search block type") return tf.compat.v1.estimator.tpu.TPUEstimator( model_fn=self._make_model_fn( run_config=run_config, train_steps=train_steps, use_tpu=True), config=run_config, use_tpu=True, params=hparams, train_batch_size=train_batch_size, eval_batch_size=(eval_batch_size or train_batch_size), embedding_config_spec=embedding_config_spec, eval_on_tpu=eval_on_tpu) @staticmethod def get_keras_hyperparameters_space(phoenix_spec, train_steps): """Gets the Phoenix search space as keras Hyperparameters object.""" hp_space = kerastuner.engine.hyperparameters.HyperParameters() hp_space.merge( architecture_utils.get_blocks_search_space(phoenix_spec.blocks_to_use)) hp_space.Float("learning_rate", 1e-6, 0.01, sampling="log") hp_space.Choice("new_block_type", phoenix_spec.blocks_to_use) # Try different optimizers. hp_space.Choice("optimizer", ["momentum", "sgd", "adagrad", "adam", "rmsprop"]) # Search for the best tower of depth phoenix_spec.minimum_depth # Used for initial structure (before evolution + going deeper). for i in range(phoenix_spec.minimum_depth): hp_space.Choice("initial_architecture_{}".format(i), phoenix_spec.blocks_to_use) learning_spec = phoenix_spec.learning_spec # Exponential decay. if learning_spec.apply_exponential_decay: hp_space.Float("exponential_decay_rate", learning_spec.min_learning_rate_decay_rate, learning_spec.max_learning_rate_decay_rate) decay_steps = [ train_steps // i for i in range(learning_spec.min_decay_times, learning_spec.max_decay_times) ] seen = set() unique_decay_steps = [ x for x in decay_steps if not (x in seen or seen.add(x)) ] hp_space.Choice("exponential_decay_steps", unique_decay_steps) # Gradient clipping if learning_spec.apply_gradient_clipping: hp_space.Int("gradient_max_norm", learning_spec.min_gradient_norm_when_clipping, learning_spec.max_gradient_norm_when_clipping) # L2 regularization if learning_spec.apply_l2_regularization: hp_space.Float("l2_regularization", learning_spec.min_l2_regularization, learning_spec.max_l2_regularization) # Apply dropout between blocks. Here -1 wouldn't apply any dropouts. if phoenix_spec.apply_dropouts_between_blocks: assert learning_spec.min_dropout < learning_spec.max_dropout step = (learning_spec.max_dropout - learning_spec.min_dropout) / 10 dropout = np.arange(learning_spec.min_dropout, learning_spec.max_dropout, step) hp_space.Choice("dropout_rate", [-1.0] + dropout.tolist()) return hp_space
40.919656
113
0.684548
442508f6fa39a1d461c998f964692ea8d6cbcd28
10,631
py
Python
mne/tests/test_report.py
mvdoc/mne-python
bac50dd08361b10d0a65c614ea2de06308750411
[ "BSD-3-Clause" ]
null
null
null
mne/tests/test_report.py
mvdoc/mne-python
bac50dd08361b10d0a65c614ea2de06308750411
[ "BSD-3-Clause" ]
2
2019-08-14T06:21:15.000Z
2020-10-29T19:54:56.000Z
mne/tests/test_report.py
mvdoc/mne-python
bac50dd08361b10d0a65c614ea2de06308750411
[ "BSD-3-Clause" ]
1
2020-03-05T16:14:37.000Z
2020-03-05T16:14:37.000Z
# Authors: Mainak Jas <mainak@neuro.hut.fi> # Teon Brooks <teon.brooks@gmail.com> # # License: BSD (3-clause) import glob import os import os.path as op import shutil import sys import warnings from nose.tools import assert_true, assert_equal, assert_raises from nose.plugins.skip import SkipTest from mne import Epochs, read_events, pick_types, read_evokeds from mne.io import read_raw_fif from mne.datasets import testing from mne.report import Report from mne.utils import (_TempDir, requires_mayavi, requires_nibabel, requires_PIL, run_tests_if_main, slow_test) from mne.viz import plot_trans import matplotlib matplotlib.use('Agg') # for testing don't use X server data_dir = testing.data_path(download=False) subjects_dir = op.join(data_dir, 'subjects') report_dir = op.join(data_dir, 'MEG', 'sample') raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif') event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif') cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif') fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif') inv_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz') base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')) evoked_fname = op.join(base_dir, 'test-ave.fif') # Set our plotters to test mode warnings.simplefilter('always') # enable b/c these tests throw warnings @slow_test @testing.requires_testing_data @requires_PIL def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') raw = read_raw_fif(raw_fname_new) picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks) epochs.save(epochs_fname) epochs.average().save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) if sys.version.startswith('3.5'): # XXX Some strange MPL/3.5 error... raise SkipTest('Python 3.5 and mpl have unresolved issues') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise') assert_true(len(w) >= 1) assert_true(repr(report)) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, pattern=pattern) assert_true(len(w) >= 1) assert_true(repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) @testing.requires_testing_data @requires_mayavi @requires_PIL def test_render_add_sections(): """Test adding figures/images to section.""" from PIL import Image tempdir = _TempDir() import matplotlib.pyplot as plt report = Report(subjects_dir=subjects_dir) # Check add_figs_to_section functionality fig = plt.plot([1, 2], [1, 2])[0].figure report.add_figs_to_section(figs=fig, # test non-list input captions=['evoked response'], scale=1.2, image_format='svg') assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig], captions='H') assert_raises(ValueError, report.add_figs_to_section, figs=fig, captions=['foo'], scale=0, image_format='svg') assert_raises(ValueError, report.add_figs_to_section, figs=fig, captions=['foo'], scale=1e-10, image_format='svg') # need to recreate because calls above change size fig = plt.plot([1, 2], [1, 2])[0].figure # Check add_images_to_section with png and then gif img_fname = op.join(tempdir, 'testimage.png') fig.savefig(img_fname) report.add_images_to_section(fnames=[img_fname], captions=['evoked response']) im = Image.open(img_fname) op.join(tempdir, 'testimage.gif') im.save(img_fname) # matplotlib does not support gif report.add_images_to_section(fnames=[img_fname], captions=['evoked response']) assert_raises(ValueError, report.add_images_to_section, fnames=[img_fname, img_fname], captions='H') assert_raises(ValueError, report.add_images_to_section, fnames=['foobar.xxx'], captions='H') evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) fig = plot_trans(evoked.info, trans_fname, subject='sample', subjects_dir=subjects_dir) report.add_figs_to_section(figs=fig, # test non-list input captions='random image', scale=1.2) assert_true(repr(report)) @slow_test @testing.requires_testing_data @requires_mayavi @requires_nibabel() def test_render_mri(): """Test rendering MRI for mne report.""" tempdir = _TempDir() trans_fname_new = op.join(tempdir, 'temp-trans.fif') for a, b in [[trans_fname, trans_fname_new]]: shutil.copyfile(a, b) report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir) with warnings.catch_warnings(record=True): warnings.simplefilter('always') report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*', n_jobs=2) report.save(op.join(tempdir, 'report.html'), open_browser=False) assert_true(repr(report)) @testing.requires_testing_data @requires_nibabel() def test_render_mri_without_bem(): """Test rendering MRI without BEM for mne report.""" tempdir = _TempDir() os.mkdir(op.join(tempdir, 'sample')) os.mkdir(op.join(tempdir, 'sample', 'mri')) shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz')) report = Report(info_fname=raw_fname, subject='sample', subjects_dir=tempdir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(tempdir) assert_true(len(w) >= 1) report.save(op.join(tempdir, 'report.html'), open_browser=False) @testing.requires_testing_data @requires_nibabel() def test_add_htmls_to_section(): """Test adding html str to mne report.""" report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir) html = '<b>MNE-Python is AWESOME</b>' caption, section = 'html', 'html_section' report.add_htmls_to_section(html, caption, section) idx = report._sectionlabels.index('report_' + section) html_compare = report.html[idx] assert_true(html in html_compare) assert_true(repr(report)) def test_add_slider_to_section(): """Test adding a slider with a series of images to mne report.""" tempdir = _TempDir() from matplotlib import pyplot as plt report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir) section = 'slider_section' figs = list() figs.append(plt.figure()) plt.plot([1, 2, 3]) plt.close('all') figs.append(plt.figure()) plt.plot([3, 2, 1]) plt.close('all') report.add_slider_to_section(figs, section=section) report.save(op.join(tempdir, 'report.html'), open_browser=False) assert_raises(NotImplementedError, report.add_slider_to_section, [figs, figs]) assert_raises(ValueError, report.add_slider_to_section, figs, ['wug']) assert_raises(TypeError, report.add_slider_to_section, figs, 'wug') def test_validate_input(): """Test Report input validation.""" report = Report() items = ['a', 'b', 'c'] captions = ['Letter A', 'Letter B', 'Letter C'] section = 'ABCs' comments = ['First letter of the alphabet.', 'Second letter of the alphabet', 'Third letter of the alphabet'] assert_raises(ValueError, report._validate_input, items, captions[:-1], section, comments=None) assert_raises(ValueError, report._validate_input, items, captions, section, comments=comments[:-1]) values = report._validate_input(items, captions, section, comments=None) items_new, captions_new, comments_new = values assert_equal(len(comments_new), len(items)) run_tests_if_main()
38.941392
79
0.670586
a7002343686c714728edfcdfea87f66985b0d715
10,214
py
Python
sphinx/transforms/post_transforms/images.py
ravwojdyla/sphinx
a7e10c1d36007650b5e3948021d4aa90d78127d4
[ "BSD-2-Clause" ]
1
2021-11-16T19:06:56.000Z
2021-11-16T19:06:56.000Z
sphinx/transforms/post_transforms/images.py
ravwojdyla/sphinx
a7e10c1d36007650b5e3948021d4aa90d78127d4
[ "BSD-2-Clause" ]
null
null
null
sphinx/transforms/post_transforms/images.py
ravwojdyla/sphinx
a7e10c1d36007650b5e3948021d4aa90d78127d4
[ "BSD-2-Clause" ]
1
2021-11-16T19:06:53.000Z
2021-11-16T19:06:53.000Z
""" sphinx.transforms.post_transforms.images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Docutils transforms used by Sphinx. :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re from math import ceil from typing import Any, Dict, List, Optional, Tuple from docutils import nodes from sphinx.application import Sphinx from sphinx.locale import __ from sphinx.transforms import SphinxTransform from sphinx.util import epoch_to_rfc1123, logging, requests, rfc1123_to_epoch, sha1 from sphinx.util.images import get_image_extension, guess_mimetype, parse_data_uri from sphinx.util.osutil import ensuredir logger = logging.getLogger(__name__) MAX_FILENAME_LEN = 32 CRITICAL_PATH_CHAR_RE = re.compile('[:;<>|*" ]') class BaseImageConverter(SphinxTransform): def apply(self, **kwargs: Any) -> None: for node in self.document.traverse(nodes.image): if self.match(node): self.handle(node) def match(self, node: nodes.image) -> bool: return True def handle(self, node: nodes.image) -> None: pass @property def imagedir(self) -> str: return os.path.join(self.app.doctreedir, 'images') class ImageDownloader(BaseImageConverter): default_priority = 100 def match(self, node: nodes.image) -> bool: if self.app.builder.supported_image_types == []: return False elif self.app.builder.supported_remote_images: return False else: return '://' in node['uri'] def handle(self, node: nodes.image) -> None: try: basename = os.path.basename(node['uri']) if '?' in basename: basename = basename.split('?')[0] if basename == '' or len(basename) > MAX_FILENAME_LEN: filename, ext = os.path.splitext(node['uri']) basename = sha1(filename.encode()).hexdigest() + ext basename = re.sub(CRITICAL_PATH_CHAR_RE, "_", basename) dirname = node['uri'].replace('://', '/').translate({ord("?"): "/", ord("&"): "/"}) if len(dirname) > MAX_FILENAME_LEN: dirname = sha1(dirname.encode()).hexdigest() ensuredir(os.path.join(self.imagedir, dirname)) path = os.path.join(self.imagedir, dirname, basename) headers = {} if os.path.exists(path): timestamp = ceil(os.stat(path).st_mtime) # type: float headers['If-Modified-Since'] = epoch_to_rfc1123(timestamp) r = requests.get(node['uri'], headers=headers) if r.status_code >= 400: logger.warning(__('Could not fetch remote image: %s [%d]') % (node['uri'], r.status_code)) else: self.app.env.original_image_uri[path] = node['uri'] if r.status_code == 200: with open(path, 'wb') as f: f.write(r.content) last_modified = r.headers.get('last-modified') if last_modified: timestamp = rfc1123_to_epoch(last_modified) os.utime(path, (timestamp, timestamp)) mimetype = guess_mimetype(path, default='*') if mimetype != '*' and os.path.splitext(basename)[1] == '': # append a suffix if URI does not contain suffix ext = get_image_extension(mimetype) newpath = os.path.join(self.imagedir, dirname, basename + ext) os.replace(path, newpath) self.app.env.original_image_uri.pop(path) self.app.env.original_image_uri[newpath] = node['uri'] path = newpath node['candidates'].pop('?') node['candidates'][mimetype] = path node['uri'] = path self.app.env.images.add_file(self.env.docname, path) except Exception as exc: logger.warning(__('Could not fetch remote image: %s [%s]') % (node['uri'], exc)) class DataURIExtractor(BaseImageConverter): default_priority = 150 def match(self, node: nodes.image) -> bool: if self.app.builder.supported_remote_images == []: return False elif self.app.builder.supported_data_uri_images is True: return False else: return node['uri'].startswith('data:') def handle(self, node: nodes.image) -> None: image = parse_data_uri(node['uri']) ext = get_image_extension(image.mimetype) if ext is None: logger.warning(__('Unknown image format: %s...'), node['uri'][:32], location=node) return ensuredir(os.path.join(self.imagedir, 'embeded')) digest = sha1(image.data).hexdigest() path = os.path.join(self.imagedir, 'embeded', digest + ext) self.app.env.original_image_uri[path] = node['uri'] with open(path, 'wb') as f: f.write(image.data) node['candidates'].pop('?') node['candidates'][image.mimetype] = path node['uri'] = path self.app.env.images.add_file(self.env.docname, path) def get_filename_for(filename: str, mimetype: str) -> str: basename = os.path.basename(filename) basename = re.sub(CRITICAL_PATH_CHAR_RE, "_", basename) return os.path.splitext(basename)[0] + get_image_extension(mimetype) class ImageConverter(BaseImageConverter): """A base class for image converters. An image converter is kind of Docutils transform module. It is used to convert image files which does not supported by builder to appropriate format for that builder. For example, :py:class:`LaTeX builder <.LaTeXBuilder>` supports PDF, PNG and JPEG as image formats. However it does not support SVG images. For such case, to use image converters allows to embed these unsupported images into the document. One of image converters; :ref:`sphinx.ext.imgconverter <sphinx.ext.imgconverter>` can convert a SVG image to PNG format using Imagemagick internally. There are three steps to make your custom image converter: 1. Make a subclass of ``ImageConverter`` class 2. Override ``conversion_rules``, ``is_available()`` and ``convert()`` 3. Register your image converter to Sphinx using :py:meth:`.Sphinx.add_post_transform` """ default_priority = 200 #: The converter is available or not. Will be filled at the first call of #: the build. The result is shared in the same process. #: #: .. todo:: This should be refactored not to store the state without class #: variable. available = None # type: Optional[bool] #: A conversion rules the image converter supports. #: It is represented as a list of pair of source image format (mimetype) and #: destination one:: #: #: conversion_rules = [ #: ('image/svg+xml', 'image/png'), #: ('image/gif', 'image/png'), #: ('application/pdf', 'image/png'), #: ] conversion_rules = [] # type: List[Tuple[str, str]] def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) def match(self, node: nodes.image) -> bool: if not self.app.builder.supported_image_types: return False elif self.available is None: # store the value to the class variable to share it during the build self.__class__.available = self.is_available() if not self.available: return False elif set(node['candidates']) & set(self.app.builder.supported_image_types): # builder supports the image; no need to convert return False else: rule = self.get_conversion_rule(node) if rule: return True else: return False def get_conversion_rule(self, node: nodes.image) -> Tuple[str, str]: for candidate in self.guess_mimetypes(node): for supported in self.app.builder.supported_image_types: rule = (candidate, supported) if rule in self.conversion_rules: return rule return None def is_available(self) -> bool: """Return the image converter is available or not.""" raise NotImplementedError() def guess_mimetypes(self, node: nodes.image) -> List[str]: if '?' in node['candidates']: return [] elif '*' in node['candidates']: return [guess_mimetype(node['uri'])] else: return node['candidates'].keys() def handle(self, node: nodes.image) -> None: _from, _to = self.get_conversion_rule(node) if _from in node['candidates']: srcpath = node['candidates'][_from] else: srcpath = node['candidates']['*'] filename = get_filename_for(srcpath, _to) ensuredir(self.imagedir) destpath = os.path.join(self.imagedir, filename) abs_srcpath = os.path.join(self.app.srcdir, srcpath) if self.convert(abs_srcpath, destpath): if '*' in node['candidates']: node['candidates']['*'] = destpath else: node['candidates'][_to] = destpath node['uri'] = destpath self.env.original_image_uri[destpath] = srcpath self.env.images.add_file(self.env.docname, destpath) def convert(self, _from: str, _to: str) -> bool: """Convert a image file to expected format. *_from* is a path for source image file, and *_to* is a path for destination file. """ raise NotImplementedError() def setup(app: Sphinx) -> Dict[str, Any]: app.add_post_transform(ImageDownloader) app.add_post_transform(DataURIExtractor) return { 'version': 'builtin', 'parallel_read_safe': True, 'parallel_write_safe': True, }
36.741007
92
0.593303
480e4170c2453e9e85839eeb95dd19fa18f0bc22
10,802
py
Python
utils/gta3sc/config.py
AndroidModLoader/gta3sc
07504a7334eb67cfac14e1f788331d1ba2b9343a
[ "MIT" ]
54
2016-06-22T22:26:58.000Z
2022-02-23T09:25:59.000Z
utils/gta3sc/config.py
GTAResources/gta3sc
a4f3f16574c4e0461ff3c14f8a2839cf3040d952
[ "MIT" ]
112
2016-06-21T22:52:17.000Z
2022-02-08T14:15:13.000Z
utils/gta3sc/config.py
thelink2012/gta3sc
07504a7334eb67cfac14e1f788331d1ba2b9343a
[ "MIT" ]
9
2016-06-24T22:27:55.000Z
2021-01-11T16:37:36.000Z
# -*- Python -*- from lxml import etree import os import re __all__ = ["Alternator", "Enum", "Command", "Argument", "Config", "read_config"] class Alternator: def __init__(self): self.name = "" self.alters = [] def __iter__(self): return iter(self.alters) @staticmethod def from_node(node): init = Alternator() init.name = node.get("Name") init.alters = [a.get("Name") for a in node.iter("Alternative")] return init def to_node(self): node = etree.Element("Alternator", Name=self.name) for a in self.alters: etree.SubElement(node, "Alternative", Name=a) return node class Enum: def __init__(self): self.name = "" self.is_global = False # global is a python keyword self.constants = {} @staticmethod def from_node(node): init = Enum() init.name = node.get("Name") init.is_global = _str2bool(node.get("Global", "false")) init.constants = {} last_value = -1 for a in node.iter("Constant"): maybe_value = a.get("Value") last_value = int(maybe_value, 0) if maybe_value is not None else last_value + 1 init.constants[a.get("Name")] = last_value return init def to_node(self): last_value = -1 node = etree.Element("Enum", Name=self.name) if self.is_global: node.set("Global", _bool2str(self.is_global)) for k,v in sorted(self.constants.items(), key=lambda x: x[1]): if v == last_value + 1: etree.SubElement(node, "Constant", Name=k) else: etree.SubElement(node, "Constant", Name=k, Value=str(v)) last_value = v return node class Command: def __init__(self): self.name = "" self.id = None self.hash = None self.supported = False self.internal = False self.extension = False self.args = [] def __eq__(self, other): return self.name == other.name and\ self.id == other.id and\ self.supported == other.supported and\ self.args == other.args def same_behaviour(self, other): if self.id == other.id and len(self.args) == len(other.args): return all(a.same_behaviour(b) for a,b in zip(self.args, other.args)) return False def has_optional(self): return len(self.args) > 0 and self.args[-1].optional == True def get_arg(self, i): if i < len(self.args): return self.args[i] elif self.has_optional(): return self.args[-1] else: return None @staticmethod def from_node(node): init = Command() cmdid = node.get("ID", None) cmdhash = node.get("Hash", None) init.name = node.get("Name") init.id = int(cmdid, 0) if cmdid is not None else None init.hash = int(cmdhash, 0) if cmdhash is not None else None init.supported = _str2bool(node.get("Supported", "true")) init.internal = _str2bool(node.get("Internal", "false")) init.extension = _str2bool(node.get("Extension", "false")) init.args = [] node_args = node.find("Args") if node_args is not None: for a in node_args.iter("Arg"): init.args.append(Argument.from_node(a)) return init def to_node(self): node = etree.Element("Command") if self.id is not None: node.set("ID", hex(self.id)) node.set("Name", self.name) if self.hash is not None: node.set("Hash", "0x%.8x" % self.hash) if self.supported == False: node.set("Supported", _bool2str(self.supported)) if self.internal == True: node.set("Internal", _bool2str(self.internal)) if self.extension == True: node.set("Extension", _bool2str(self.extension)) if len(self.args) > 0: node_args = etree.SubElement(node, "Args") for a in self.args: node_args.append(a.to_node()) return node class Argument: def __init__(self): self.type = "ANY" self.desc = "" self.out = False self.ref = False self.optional = False self.allow_const = False self.allow_gvar = False self.allow_lvar = False self.entity = None self.enums = [] self.allow_text_label = False # valid only for PARAM types self.allow_pointer = False self.preserve_case = False def __eq__(self, other): return self.__dict__ == other.__dict__ def same_behaviour(self, other): return self.type == other.type and\ self.out == other.out and\ self.ref == other.ref and\ self.optional == other.optional def has_enum(self, name): return any(x == name for x in self.enums) @staticmethod def from_node(node): init = Argument() init.type = node.get("Type") init.desc = node.get("Desc", "") init.out = _str2bool(node.get("Out", "false")) init.ref = _str2bool(node.get("Ref", "false")) init.optional = _str2bool(node.get("Optional", "false")) init.allow_const = _str2bool(node.get("AllowConst", "false" if init.out else "true")) init.allow_gvar = _str2bool(node.get("AllowGlobalVar", "true" if init.type != "LABEL" else "false")) init.allow_lvar = _str2bool(node.get("AllowLocalVar", "true" if init.type != "LABEL" else "false")) init.allow_text_label = _str2bool(node.get("AllowTextLabel", "false")) init.allow_pointer = _str2bool(node.get("AllowPointer", "false")) init.preserve_case = _str2bool(node.get("PreserveCase", "false")) init.entity = node.get("Entity", None) init.enums = node.get("Enum", None) init.enums = [init.enums] if init.enums else [] return init def to_node(self): default_allow_var = True if self.type != "LABEL" else False node = etree.Element("Arg", Type=self.type) if self.desc.strip() != "": node.set("Desc", self.desc) if self.out != False: node.set("Out", _bool2str(self.out)) if self.ref != False: node.set("Ref", _bool2str(self.ref)) if self.optional != False: node.set("Optional", _bool2str(self.optional)) if self.allow_const == False and self.out == False: node.set("AllowConst", _bool2str(self.allow_const)) if self.allow_gvar != default_allow_var: node.set("AllowGlobalVar", _bool2str(self.allow_gvar)) if self.allow_lvar != default_allow_var: node.set("AllowLocalVar", _bool2str(self.allow_lvar)) if self.allow_text_label == True: node.set("AllowTextLabel", _bool2str(self.allow_text_label)) if self.allow_pointer == True: node.set("AllowPointer", _bool2str(self.allow_pointer)) if self.preserve_case == True: node.set("PreserveCase", _bool2str(self.preserve_case)) if self.entity != None: node.set("Entity", self.entity) if len(self.enums) != 0: assert len(self.enums) == 1 node.set("Enum", self.enums[0]) return node class Config: def __init__(self): self.commands = [] self.enums = [] self.alternators = [] def get_alternator(self, name): return next((x for x in self.alternators if x.name == name), None) def read_config(self, file): tree = etree.parse(file) for item in tree.getroot(): if item.tag == "Alternators": for subitem in item: if subitem.tag == "Alternator": self.alternators.append(Alternator.from_node(subitem)) elif item.tag == "Commands": for subitem in item: if subitem.tag == "Command": self.commands.append(Command.from_node(subitem)) elif item.tag == "Constants": for subitem in item: if subitem.tag == "Enum": self.enums.append(Enum.from_node(subitem)) def save_config(self, file, pretty_print=True): root = etree.Element("GTA3Script") if len(self.enums) > 0: base = etree.SubElement(root, "Constants") for c in self.enums: base.append(c.to_node()) if len(self.alternators) > 0: base = etree.SubElement(root, "Alternators") for c in self.alternators: base.append(c.to_node()) if len(self.commands) > 0: base = etree.SubElement(root, "Commands") for c in self.commands: base.append(c.to_node()) tree = etree.ElementTree(root) tree.write(file, encoding="utf-8", pretty_print=pretty_print, xml_declaration=True) def read_config(filename): c = Config() if os.path.isdir(filename): for subfile in os.listdir(filename): if subfile.endswith(".xml"): c.read_config(os.path.join(filename, subfile)) else: c.read_config(filename) return c def read_commandline(configpath): result = [] with open(os.path.join(configpath, "commandline.txt")) as f: for command in re.split(r"[\r\n\t ]", f.read()): split = command.split('=') left = split[0] right = split[1] if len(split) > 1 else True if left.startswith("-fno-") or left.startswith("-mno-"): assert right == True left = left[:2] + left[5:] right = False result.append((left, right)) return result def _str2bool(x): if x == "true": return True if x == "false": return False print(x) assert False def _bool2str(x): if x == True: return "true" if x == False: return "false" print(type(x)) assert False def one_at_a_time(key): result = 0 for i in range(0, len(key)): result += ord(key[i]); result &= 0xFFFFFFFF result += (result << 10) & 0xFFFFFFFF; result &= 0xFFFFFFFF result ^= (result >> 6) & 0xFFFFFFFF; result &= 0xFFFFFFFF result += (result << 3) & 0xFFFFFFFF; result &= 0xFFFFFFFF result ^= (result >> 11) & 0xFFFFFFFF; result &= 0xFFFFFFFF result += (result << 15) & 0xFFFFFFFF; result &= 0xFFFFFFFF return result if __name__ == "__main__": import sys cfg = Config() cfg.read_config(sys.argv[1]) cfg.save_config(sys.argv[1])
34.957929
108
0.563692
d8f3ce54d97ffeb779f533a7e4fa6898abd123d7
19,305
py
Python
test/functional/feature_backwards_compatibility.py
bitcoinil/bitcoinil
c546f96205806e1c969419cb83c107961cf143cc
[ "MIT" ]
1
2021-04-01T17:28:54.000Z
2021-04-01T17:28:54.000Z
test/functional/feature_backwards_compatibility.py
bitcoinil/bitcoinil
c546f96205806e1c969419cb83c107961cf143cc
[ "MIT" ]
1
2021-10-22T02:52:33.000Z
2021-10-22T02:52:33.000Z
test/functional/feature_backwards_compatibility.py
bitcoinil/bitcoinil
c546f96205806e1c969419cb83c107961cf143cc
[ "MIT" ]
1
2021-08-19T01:50:12.000Z
2021-08-19T01:50:12.000Z
#!/usr/bin/env python3 # Copyright (c) 2018-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Backwards compatibility functional test Test various backwards compatibility scenarios. Download the previous node binaries: test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2 v0.15.2 is not required by this test, but it is used in wallet_upgradewallet.py. Due to a hardfork in regtest, it can't be used to sync nodes. Due to RPC changes introduced in various versions the below tests won't work for older versions without some patches or workarounds. Use only the latest patch version of each release, unless a test specifically needs an older patch version. """ import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.descriptors import descsum_create from test_framework.util import ( assert_equal, assert_raises_rpc_error, ) class BackwardsCompatibilityTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 # Add new version after each release: self.extra_args = [ ["-addresstype=bech32"], # Pre-release: use to mine blocks ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # Pre-release: use to receive coins, swap wallets, etc ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.19.1 ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.18.1 ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.17.2 ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-wallet=wallet.dat"], # v0.16.3 ] self.wallet_names = [self.default_wallet_name] def skip_test_if_missing_module(self): self.skip_if_no_wallet() self.skip_if_no_previous_releases() def setup_nodes(self): self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[ None, None, 190100, 180100, 170200, 160300, ]) self.start_nodes() self.import_deterministic_coinbase_privkeys() def run_test(self): self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress()) self.sync_blocks() # Sanity check the test framework: res = self.nodes[self.num_nodes - 1].getblockchaininfo() assert_equal(res['blocks'], 101) node_master = self.nodes[self.num_nodes - 5] node_v19 = self.nodes[self.num_nodes - 4] node_v18 = self.nodes[self.num_nodes - 3] node_v17 = self.nodes[self.num_nodes - 2] node_v16 = self.nodes[self.num_nodes - 1] self.log.info("Test wallet backwards compatibility...") # Create a number of wallets and open them in older versions: # w1: regular wallet, created on master: update this test when default # wallets can no longer be opened by older versions. node_master.createwallet(wallet_name="w1") wallet = node_master.get_wallet_rpc("w1") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] > 0 # Create a confirmed transaction, receiving coins address = wallet.getnewaddress() self.nodes[0].sendtoaddress(address, 10) self.sync_mempools() self.nodes[0].generate(1) self.sync_blocks() # Create a conflicting transaction using RBF return_address = self.nodes[0].getnewaddress() tx1_id = self.nodes[1].sendtoaddress(return_address, 1) tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"] # Confirm the transaction self.sync_mempools() self.nodes[0].generate(1) self.sync_blocks() # Create another conflicting transaction using RBF tx3_id = self.nodes[1].sendtoaddress(return_address, 1) tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"] # Abandon transaction, but don't confirm self.nodes[1].abandontransaction(tx3_id) # w1_v19: regular wallet, created with v0.19 node_v19.rpc.createwallet(wallet_name="w1_v19") wallet = node_v19.get_wallet_rpc("w1_v19") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] > 0 # Use addmultisigaddress (see #18075) address_18075 = wallet.rpc.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"] assert wallet.getaddressinfo(address_18075)["solvable"] # w1_v18: regular wallet, created with v0.18 node_v18.rpc.createwallet(wallet_name="w1_v18") wallet = node_v18.get_wallet_rpc("w1_v18") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] > 0 # w2: wallet with private keys disabled, created on master: update this # test when default wallets private keys disabled can no longer be # opened by older versions. node_master.createwallet(wallet_name="w2", disable_private_keys=True) wallet = node_master.get_wallet_rpc("w2") info = wallet.getwalletinfo() assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 # w2_v19: wallet with private keys disabled, created with v0.19 node_v19.rpc.createwallet(wallet_name="w2_v19", disable_private_keys=True) wallet = node_v19.get_wallet_rpc("w2_v19") info = wallet.getwalletinfo() assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 # w2_v18: wallet with private keys disabled, created with v0.18 node_v18.rpc.createwallet(wallet_name="w2_v18", disable_private_keys=True) wallet = node_v18.get_wallet_rpc("w2_v18") info = wallet.getwalletinfo() assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 # w3: blank wallet, created on master: update this # test when default blank wallets can no longer be opened by older versions. node_master.createwallet(wallet_name="w3", blank=True) wallet = node_master.get_wallet_rpc("w3") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] == 0 # w3_v19: blank wallet, created with v0.19 node_v19.rpc.createwallet(wallet_name="w3_v19", blank=True) wallet = node_v19.get_wallet_rpc("w3_v19") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] == 0 # w3_v18: blank wallet, created with v0.18 node_v18.rpc.createwallet(wallet_name="w3_v18", blank=True) wallet = node_v18.get_wallet_rpc("w3_v18") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] == 0 # Copy the wallets to older nodes: node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets") node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets") node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets") node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets") node_v16_wallets_dir = os.path.join(node_v16.datadir, "regtest") node_master.unloadwallet("w1") node_master.unloadwallet("w2") node_v19.unloadwallet("w1_v19") node_v19.unloadwallet("w2_v19") node_v18.unloadwallet("w1_v18") node_v18.unloadwallet("w2_v18") # Copy wallets to v0.16 for wallet in os.listdir(node_master_wallets_dir): shutil.copytree( os.path.join(node_master_wallets_dir, wallet), os.path.join(node_v16_wallets_dir, wallet) ) # Copy wallets to v0.17 for wallet in os.listdir(node_master_wallets_dir): shutil.copytree( os.path.join(node_master_wallets_dir, wallet), os.path.join(node_v17_wallets_dir, wallet) ) for wallet in os.listdir(node_v18_wallets_dir): shutil.copytree( os.path.join(node_v18_wallets_dir, wallet), os.path.join(node_v17_wallets_dir, wallet) ) # Copy wallets to v0.18 for wallet in os.listdir(node_master_wallets_dir): shutil.copytree( os.path.join(node_master_wallets_dir, wallet), os.path.join(node_v18_wallets_dir, wallet) ) # Copy wallets to v0.19 for wallet in os.listdir(node_master_wallets_dir): shutil.copytree( os.path.join(node_master_wallets_dir, wallet), os.path.join(node_v19_wallets_dir, wallet) ) if not self.options.descriptors: # Descriptor wallets break compatibility, only run this test for legacy wallet # Open the wallets in v0.19 node_v19.loadwallet("w1") wallet = node_v19.get_wallet_rpc("w1") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] > 0 txs = wallet.listtransactions() assert_equal(len(txs), 5) assert_equal(txs[1]["txid"], tx1_id) assert_equal(txs[2]["walletconflicts"], [tx1_id]) assert_equal(txs[1]["replaced_by_txid"], tx2_id) assert not(txs[1]["abandoned"]) assert_equal(txs[1]["confirmations"], -1) assert_equal(txs[2]["blockindex"], 1) assert txs[3]["abandoned"] assert_equal(txs[4]["walletconflicts"], [tx3_id]) assert_equal(txs[3]["replaced_by_txid"], tx4_id) assert not(hasattr(txs[3], "blockindex")) node_v19.loadwallet("w2") wallet = node_v19.get_wallet_rpc("w2") info = wallet.getwalletinfo() assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 node_v19.loadwallet("w3") wallet = node_v19.get_wallet_rpc("w3") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] == 0 # Open the wallets in v0.18 node_v18.loadwallet("w1") wallet = node_v18.get_wallet_rpc("w1") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] > 0 txs = wallet.listtransactions() assert_equal(len(txs), 5) assert_equal(txs[1]["txid"], tx1_id) assert_equal(txs[2]["walletconflicts"], [tx1_id]) assert_equal(txs[1]["replaced_by_txid"], tx2_id) assert not(txs[1]["abandoned"]) assert_equal(txs[1]["confirmations"], -1) assert_equal(txs[2]["blockindex"], 1) assert txs[3]["abandoned"] assert_equal(txs[4]["walletconflicts"], [tx3_id]) assert_equal(txs[3]["replaced_by_txid"], tx4_id) assert not(hasattr(txs[3], "blockindex")) node_v18.loadwallet("w2") wallet = node_v18.get_wallet_rpc("w2") info = wallet.getwalletinfo() assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 node_v18.loadwallet("w3") wallet = node_v18.get_wallet_rpc("w3") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] == 0 node_v17.loadwallet("w1") wallet = node_v17.get_wallet_rpc("w1") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] > 0 node_v17.loadwallet("w2") wallet = node_v17.get_wallet_rpc("w2") info = wallet.getwalletinfo() assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 else: # Descriptor wallets appear to be corrupted wallets to old software assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w1") assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w2") assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w3") assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w1") assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w2") assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w3") # Open the wallets in v0.17 node_v17.loadwallet("w1_v18") wallet = node_v17.get_wallet_rpc("w1_v18") info = wallet.getwalletinfo() assert info['private_keys_enabled'] assert info['keypoolsize'] > 0 node_v17.loadwallet("w2_v18") wallet = node_v17.get_wallet_rpc("w2_v18") info = wallet.getwalletinfo() assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 # RPC loadwallet failure causes bitcoind to exit, in addition to the RPC # call failure, so the following test won't work: # assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18') # Instead, we stop node and try to launch it with the wallet: self.stop_node(4) node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Bitcoin Core") if self.options.descriptors: # Descriptor wallets appear to be corrupted wallets to old software node_v17.assert_start_raises_init_error(["-wallet=w1"], "Error: wallet.dat corrupt, salvage failed") node_v17.assert_start_raises_init_error(["-wallet=w2"], "Error: wallet.dat corrupt, salvage failed") node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: wallet.dat corrupt, salvage failed") else: node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core") self.start_node(4) if not self.options.descriptors: # Descriptor wallets break compatibility, only run this test for legacy wallets # Open most recent wallet in v0.16 (no loadwallet RPC) self.restart_node(5, extra_args=["-wallet=w2"]) wallet = node_v16.get_wallet_rpc("w2") info = wallet.getwalletinfo() assert info['keypoolsize'] == 1 # Create upgrade wallet in v0.16 self.restart_node(-1, extra_args=["-wallet=u1_v16"]) wallet = node_v16.get_wallet_rpc("u1_v16") v16_addr = wallet.getnewaddress('', "bech32") v16_info = wallet.validateaddress(v16_addr) v16_pubkey = v16_info['pubkey'] self.stop_node(-1) self.log.info("Test wallet upgrade path...") # u1: regular wallet, created with v0.17 node_v17.rpc.createwallet(wallet_name="u1_v17") wallet = node_v17.get_wallet_rpc("u1_v17") address = wallet.getnewaddress("bech32") v17_info = wallet.getaddressinfo(address) hdkeypath = v17_info["hdkeypath"] pubkey = v17_info["pubkey"] # Copy the 0.16 wallet to the last Bitcoin Core version and open it: shutil.copyfile( os.path.join(node_v16_wallets_dir, "wallets/u1_v16"), os.path.join(node_master_wallets_dir, "u1_v16") ) load_res = node_master.loadwallet("u1_v16") # Make sure this wallet opens without warnings. See https://github.com/bitcoinil/bitcoinil/pull/19054 assert_equal(load_res['warning'], '') wallet = node_master.get_wallet_rpc("u1_v16") info = wallet.getaddressinfo(v16_addr) descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")" assert_equal(info["desc"], descsum_create(descriptor)) # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16")) shutil.copyfile( os.path.join(node_master_wallets_dir, "u1_v16"), os.path.join(node_v16_wallets_dir, "wallets/u1_v16") ) self.start_node(-1, extra_args=["-wallet=u1_v16"]) wallet = node_v16.get_wallet_rpc("u1_v16") info = wallet.validateaddress(v16_addr) assert_equal(info, v16_info) # Copy the 0.17 wallet to the last Bitcoin Core version and open it: node_v17.unloadwallet("u1_v17") shutil.copytree( os.path.join(node_v17_wallets_dir, "u1_v17"), os.path.join(node_master_wallets_dir, "u1_v17") ) node_master.loadwallet("u1_v17") wallet = node_master.get_wallet_rpc("u1_v17") info = wallet.getaddressinfo(address) descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")" assert_equal(info["desc"], descsum_create(descriptor)) # Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it node_master.unloadwallet("u1_v17") shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17")) shutil.copytree( os.path.join(node_master_wallets_dir, "u1_v17"), os.path.join(node_v17_wallets_dir, "u1_v17") ) node_v17.loadwallet("u1_v17") wallet = node_v17.get_wallet_rpc("u1_v17") info = wallet.getaddressinfo(address) assert_equal(info, v17_info) # Copy the 0.19 wallet to the last Bitcoin Core version and open it: shutil.copytree( os.path.join(node_v19_wallets_dir, "w1_v19"), os.path.join(node_master_wallets_dir, "w1_v19") ) node_master.loadwallet("w1_v19") wallet = node_master.get_wallet_rpc("w1_v19") assert wallet.getaddressinfo(address_18075)["solvable"] # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it node_master.unloadwallet("w1_v19") shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19")) shutil.copytree( os.path.join(node_master_wallets_dir, "w1_v19"), os.path.join(node_v19_wallets_dir, "w1_v19") ) node_v19.loadwallet("w1_v19") wallet = node_v19.get_wallet_rpc("w1_v19") assert wallet.getaddressinfo(address_18075)["solvable"] if __name__ == '__main__': BackwardsCompatibilityTest().main()
45.210773
223
0.643667
3f9be415b587f7c2011665800204f1538bc90355
2,810
py
Python
ydb/_utilities.py
yandex-cloud/ydb-python-sdk
0df2dce2d77fc41ad3020072740f51dd91630177
[ "Apache-2.0" ]
19
2019-07-01T08:25:29.000Z
2022-01-26T14:46:51.000Z
ydb/_utilities.py
yandex-cloud/ydb-python-sdk
0df2dce2d77fc41ad3020072740f51dd91630177
[ "Apache-2.0" ]
5
2019-07-02T13:36:42.000Z
2021-09-14T06:46:48.000Z
ydb/_utilities.py
yandex-cloud/ydb-python-sdk
0df2dce2d77fc41ad3020072740f51dd91630177
[ "Apache-2.0" ]
10
2019-06-07T10:36:19.000Z
2021-10-15T08:58:11.000Z
# -*- coding: utf-8 -*- import six import codecs from concurrent import futures import functools import hashlib import collections from . import ydb_version try: from . import interceptor except ImportError: interceptor = None def wrap_result_in_future(result): f = futures.Future() f.set_result(result) return f def wrap_exception_in_future(exc): f = futures.Future() f.set_exception(exc) return f def future(): return futures.Future() def x_ydb_sdk_build_info_header(): return ("x-ydb-sdk-build-info", "ydb-python-sdk/" + ydb_version.VERSION) # Decorator that ensures no exceptions are leaked from decorated async call def wrap_async_call_exceptions(f): @functools.wraps(f) def decorator(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: return wrap_exception_in_future(e) return decorator def get_query_hash(yql_text): try: return hashlib.sha256(six.text_type(yql_text, 'utf-8').encode('utf-8')).hexdigest() except TypeError: return hashlib.sha256(six.text_type(yql_text).encode('utf-8')).hexdigest() class LRUCache(object): def __init__(self, capacity=1000): self.items = collections.OrderedDict() self.capacity = capacity def put(self, key, value): self.items[key] = value while len(self.items) > self.capacity: self.items.popitem(last=False) def get(self, key, _default): if key not in self.items: return _default value = self.items.pop(key) self.items[key] = value return value def erase(self, key): self.items.pop(key) def from_bytes(val): """ Translates value into valid utf8 string :param val: A value to translate :return: A valid utf8 string """ try: return codecs.decode(val, 'utf8') except (UnicodeEncodeError, TypeError): return val class AsyncResponseIterator(object): def __init__(self, it, wrapper): self.it = it self.wrapper = wrapper def cancel(self): self.it.cancel() return self def __iter__(self): return self def _next(self): return interceptor.operate_async_stream_call(self.it, self.wrapper) def next(self): return self._next() def __next__(self): return self._next() class SyncResponseIterator(object): def __init__(self, it, wrapper): self.it = it self.wrapper = wrapper def cancel(self): self.it.cancel() return self def __iter__(self): return self def _next(self): return self.wrapper(next(self.it)) def next(self): return self._next() def __next__(self): return self._next()
21.782946
91
0.638078
12cd5c0fd69d39007caa5279f4f5c4b92100a7f6
1,136
py
Python
edgelm/fairseq/pdb.py
guotao0628/DeepNet
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
[ "MIT" ]
1
2021-11-07T00:30:05.000Z
2021-11-07T00:30:05.000Z
edgelm/fairseq/pdb.py
guotao0628/DeepNet
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
[ "MIT" ]
null
null
null
edgelm/fairseq/pdb.py
guotao0628/DeepNet
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
[ "MIT" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import multiprocessing import os import pdb import sys __all__ = ["set_trace"] _stdin = [None] _stdin_lock = multiprocessing.Lock() try: _stdin_fd = sys.stdin.fileno() except Exception: _stdin_fd = None class MultiprocessingPdb(pdb.Pdb): """A Pdb wrapper that works in a multiprocessing environment. Usage: `from fairseq import pdb; pdb.set_trace()` """ def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with _stdin_lock: try: if _stdin_fd is not None: if not _stdin[0]: _stdin[0] = os.fdopen(_stdin_fd) sys.stdin = _stdin[0] self.cmdloop() finally: sys.stdin = stdin_bak def set_trace(): pdb = MultiprocessingPdb() pdb.set_trace(sys._getframe().f_back)
23.666667
66
0.589789