hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0f91d65c4b102529c8a16845cd237a93da7ad2
| 925
|
py
|
Python
|
var/spack/repos/builtin/packages/r-mitml/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2018-11-27T03:39:44.000Z
|
2021-09-06T15:50:35.000Z
|
var/spack/repos/builtin/packages/r-mitml/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/r-mitml/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMitml(RPackage):
"""Provides tools for multiple imputation of missing data in multilevel
modeling. Includes a user-friendly interface to the packages 'pan' and
'jomo', and several functions for visualization, data management and the
analysis of multiply imputed data sets."""
homepage = "https://cran.r-project.org/package=mitml"
url = "https://cran.r-project.org/src/contrib/mitml_0.3-5.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/mitml"
version('0.3-5', '6f8659c33696915bf510241287b2a34d')
depends_on('r-pan', type=('build', 'run'))
depends_on('r-jomo', type=('build', 'run'))
depends_on('r-haven', type=('build', 'run'))
| 40.217391
| 76
| 0.709189
|
4a0f9240bf41e271ca534b89930ae31aa0b29b06
| 332
|
py
|
Python
|
devilery/delivery/ext/auth/models.py
|
Fernandes01/curso-flask
|
f5275d7217dd3cccdfef1eb3f873d4537cb0690d
|
[
"Unlicense"
] | null | null | null |
devilery/delivery/ext/auth/models.py
|
Fernandes01/curso-flask
|
f5275d7217dd3cccdfef1eb3f873d4537cb0690d
|
[
"Unlicense"
] | null | null | null |
devilery/delivery/ext/auth/models.py
|
Fernandes01/curso-flask
|
f5275d7217dd3cccdfef1eb3f873d4537cb0690d
|
[
"Unlicense"
] | null | null | null |
from delivery.ext.db import db
class User(db.Model):
__tablename__ = "user"
id = db.Column("id", db.Integer, primary_key=True)
email = db.Column("email", db.Unicode, unique=True)
passwd = db.Column("passwd", db.Unicode)
admin = db.Column("admin", db.Boolean)
def __repr__(self):
return self.email
| 25.538462
| 55
| 0.653614
|
4a0f95b37a7918f03fb6fa03235b78e0432871d6
| 17,572
|
py
|
Python
|
workflow.py
|
ken11/ML-Workflow-with-SageMaker-and-StepFunctions
|
83bf1671d2d4b3a5bec5b075fd9870e8ebea1402
|
[
"MIT"
] | 2
|
2021-07-28T11:06:24.000Z
|
2021-09-22T02:07:32.000Z
|
workflow.py
|
ken11/ML-Workflow-with-SageMaker-and-StepFunctions
|
83bf1671d2d4b3a5bec5b075fd9870e8ebea1402
|
[
"MIT"
] | null | null | null |
workflow.py
|
ken11/ML-Workflow-with-SageMaker-and-StepFunctions
|
83bf1671d2d4b3a5bec5b075fd9870e8ebea1402
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta, timezone
import argparse
import boto3
import sagemaker
import stepfunctions
import yaml
import time
from sagemaker.processing import (ProcessingInput, ProcessingOutput,
ScriptProcessor)
from sagemaker.tensorflow.estimator import TensorFlow
from sagemaker.estimator import Estimator
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from stepfunctions import steps
from stepfunctions.inputs import ExecutionInput
from stepfunctions.steps import Chain, ProcessingStep
from stepfunctions.steps.states import Retry, Choice, Fail, Catch
from stepfunctions.steps.choice_rule import ChoiceRule
from stepfunctions.workflow import Workflow
class MLWorkflow:
def __init__(self, conf):
self.workflow_name = conf['stepfunctions']['workflow']['name']
self.sagemaker_session = sagemaker.Session()
self.region = self.sagemaker_session.boto_region_name
self.sagemaker_execution_role = conf['aws']['role']['sagemaker_execution']
self.workflow_execution_role = conf['aws']['role']['workflow_execution']
self.account_id = boto3.client('sts').get_caller_identity().get('Account')
self.bucket = conf['aws']['bucket']
self.repository_uri = conf['aws']['ecr_repository_uri']
self.data_update = conf['sagemaker']['data_update']
self.preprocess_job_name_prefix = conf['sagemaker']['processing']['preprocess']['job_name_prefix']
self.train_job_name_prefix = conf['sagemaker']['training']['job_name_prefix']
self.evaluation_job_name_prefix = conf['sagemaker']['processing']['evaluation']['job_name_prefix']
self.experiment_name = conf['sagemaker']['experiment']['name']
self.experiment_bucket_name = conf['sagemaker']['experiment']['bucket_name']
self.experiment_key = conf['sagemaker']['experiment']['key']
JST = timezone(timedelta(hours=+9), 'JST')
self.timestamp = datetime.now(JST).strftime("%Y-%m-%d-%H-%M-%S")
self.hyperparameters = conf['sagemaker']['training']['hyperparameters']
self.learning_rate = self.hyperparameters['learning_rate']
self.epochs = self.hyperparameters['epochs']
self.preprocessor_settings = conf['sagemaker']['processing']['preprocess']
self.estimator_settings = conf['sagemaker']['training']
self.evaluation_processor_settings = conf['sagemaker']['processing']['evaluation']
self.lambda_settings = conf['lambda']
self.input = conf['aws']['input_data_s3_uri']
self.execution_input = ExecutionInput(
schema={
"DataUpdate": bool,
"PreprocessingJobName": str,
"PreprocessingInputData": str,
"PreprocessingOutputDataTrain": str,
"PreprocessingOutputDataTest": str,
"TrainingJobName": str,
"TrainingParameters": dict,
"TrainingOutputModel": str,
"ExperimentName": str,
"EvaluationProcessingJobName": str,
"EvaluationProcessingOutput": str,
}
)
self.experiment = self._create_experiments(self.experiment_name)
self.trial = self._create_trial(self.experiment_name)
# Workflow creation
def create(self):
preprocess_job_name, train_job_name, evaluation_job_name = self._create_job_name()
s3_bucket_base_uri = f"s3://{self.bucket}"
output_data = f"{s3_bucket_base_uri}/data/processing/output-{self.timestamp}"
model_data_s3_uri = f"{s3_bucket_base_uri}/{train_job_name}/output/model.tar.gz"
output_model_evaluation_s3_uri = f"{s3_bucket_base_uri}/{train_job_name}/evaluation"
# Creating each step
data_source_step = self._data_source()
preprocess_step = self._preprocess()
train_step, train_code = self._train(f"{s3_bucket_base_uri}/{train_job_name}/output", data_source_step)
evaluation_step = self._evaluation(train_step)
experiment_upload_step = self._experiment_upload()
# Determine whether to execute preprocess.
# If there is a data update, preprocessing is executed.
# (Judged by the contents of the `DataUpdate` key of ExecutionInput)
choice_state = Choice("Determine whether to execute preprocess.")
choice_state.add_choice(
rule=ChoiceRule.BooleanEquals(variable="$.DataUpdate", value=True),
next_step=preprocess_step
)
choice_state.add_choice(
rule=ChoiceRule.BooleanEquals(variable="$.DataUpdate", value=False),
next_step=data_source_step
)
# Create a step when it fails
failed_state_sagemaker_processing_failure = Fail(
"ML Workflow failed", cause="SageMakerProcessingJobFailed"
)
catch_state_processing = Catch(
error_equals=["States.TaskFailed"],
next_step=failed_state_sagemaker_processing_failure,
)
data_source_step.add_catch(catch_state_processing)
preprocess_step.add_catch(catch_state_processing)
train_step.add_catch(catch_state_processing)
evaluation_step.add_catch(catch_state_processing)
experiment_upload_step.add_catch(catch_state_processing)
# execution
workflow_graph = Chain([choice_state, preprocess_step, data_source_step, train_step, evaluation_step, experiment_upload_step])
branching_workflow = Workflow(
name=self.workflow_name,
definition=workflow_graph,
role=self.workflow_execution_role,
)
branching_workflow.create()
branching_workflow.update(workflow_graph)
# NOTE: The update will not be reflected immediately, so you have to wait for a while.
time.sleep(5)
branching_workflow.execute(
inputs={
"DataUpdate": self.data_update,
"PreprocessingJobName": preprocess_job_name,
"PreprocessingInputData": self.input,
"PreprocessingOutputDataTrain": output_data + '/train_data',
"PreprocessingOutputDataTest": output_data + '/test_data',
"TrainingJobName": train_job_name,
"TrainingParameters": {
"sagemaker_program": "train.py",
"sagemaker_submit_directory": train_code,
"learning_rate": self.learning_rate,
"epochs": self.epochs
},
"TrainingOutputModel": model_data_s3_uri,
"ExperimentName": self.experiment.experiment_name,
"EvaluationProcessingJobName": evaluation_job_name,
"EvaluationProcessingOutput": output_model_evaluation_s3_uri
}
)
# Select a data source according to whether the data has been updated.
# If the data has not been updated, select the latest preprocessed data from the past Experiments data.
def _data_source(self):
step = stepfunctions.steps.compute.LambdaStep(
"data source",
parameters={
"FunctionName": self.lambda_settings['data_source']['function_name'],
"Payload": {
"StateInput.$": "$",
"data_update": self.execution_input["DataUpdate"],
"experiment-name": self.experiment_name,
"bucket_name": self.bucket,
"job": "data_source"
},
},
)
step.add_retry(
Retry(error_equals=["States.TaskFailed"], interval_seconds=15, max_attempts=2, backoff_rate=4.0)
)
return step
# pre-process step creation
def _preprocess(self):
# https://sagemaker.readthedocs.io/en/stable/api/training/processing.html?highlight=ScriptProcessor#sagemaker.processing.ScriptProcessor
preprocessor = ScriptProcessor(
command=['python3'],
image_uri=self.repository_uri,
role=self.sagemaker_execution_role,
sagemaker_session=self.sagemaker_session,
instance_count=self.preprocessor_settings['instance_count'],
instance_type=self.preprocessor_settings['instance_type'],
max_runtime_in_seconds=self.preprocessor_settings['max_runtime_in_seconds']
)
input_code = self._upload('preprocess.py', "data/preprocess/code")
# Define inputs and outputs
inputs = [
ProcessingInput(
source=self.execution_input["PreprocessingInputData"], destination="/opt/ml/processing/input", input_name="source_input"
),
ProcessingInput(
source=input_code,
destination="/opt/ml/processing/input/code",
input_name="preprocess_code",
),
]
outputs = [
ProcessingOutput(
source="/opt/ml/processing/train",
destination=self.execution_input["PreprocessingOutputDataTrain"],
output_name="train_data",
),
ProcessingOutput(
source="/opt/ml/processing/test",
destination=self.execution_input["PreprocessingOutputDataTest"],
output_name="test_data",
),
]
# https://aws-step-functions-data-science-sdk.readthedocs.io/en/v2.1.0/sagemaker.html?highlight=ProcessingStep#stepfunctions.steps.sagemaker.ProcessingStep
return ProcessingStep(
"Preprocessing step",
processor=preprocessor,
job_name=self.execution_input["PreprocessingJobName"],
inputs=inputs,
outputs=outputs,
experiment_config={
"ExperimentName": self.execution_input["ExperimentName"],
'TrialName': self.trial.trial_name,
'TrialComponentDisplayName': 'Preprocess'
},
container_entrypoint=[
"python3", "/opt/ml/processing/input/code/preprocess.py"
],
)
# training step creation
def _train(self, model_dir, step):
# NOTE: max_wait can be specified only when using spot instance.
if self.estimator_settings['use_spot_instances']:
max_wait = self.estimator_settings['max_wait']
else:
max_wait = None
# NOTE: You can also use your own container image.
if self.estimator_settings['image_uri']:
estimator = Estimator(
image_uri=self.estimator_settings['image_uri'],
instance_count=self.estimator_settings['instance_count'],
instance_type=self.estimator_settings['instance_type'],
use_spot_instances=self.estimator_settings['use_spot_instances'],
max_run=self.estimator_settings['max_run'],
max_wait=max_wait,
role=self.sagemaker_execution_role,
output_path=f"s3://{self.bucket}",
metric_definitions=[
{'Name': 'train:loss', 'Regex': '.*?loss: (.*?) -'},
{'Name': 'train:accuracy', 'Regex': '.*?accuracy: (0.\\d+).*?'},
],
)
else:
# https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/sagemaker.tensorflow.html#tensorflow-estimator
estimator = TensorFlow(
entry_point="train.py",
instance_count=self.estimator_settings['instance_count'],
instance_type=self.estimator_settings['instance_type'],
use_spot_instances=self.estimator_settings['use_spot_instances'],
max_run=self.estimator_settings['max_run'],
max_wait=max_wait,
role=self.sagemaker_execution_role,
framework_version="2.4",
py_version="py37",
output_path=f"s3://{self.bucket}",
metric_definitions=[
{'Name': 'train:loss', 'Regex': '.*?loss: (.*?) -'},
{'Name': 'train:accuracy', 'Regex': '.*?accuracy: (0.\\d+).*?'},
],
)
train_code = self._upload('source.tar.gz', 'data/train/code')
# https://aws-step-functions-data-science-sdk.readthedocs.io/en/v2.1.0/sagemaker.html?highlight=ProcessingStep#stepfunctions.steps.sagemaker.TrainingStep
return steps.TrainingStep(
"Training Step",
estimator=estimator,
data={"train_data": sagemaker.TrainingInput(
step.output()["Payload"]["train_data"], content_type="application/octet-stream"
)},
job_name=self.execution_input["TrainingJobName"],
experiment_config={
"ExperimentName": self.execution_input["ExperimentName"],
'TrialName': self.trial.trial_name,
'TrialComponentDisplayName': 'Train'
},
hyperparameters=self.execution_input["TrainingParameters"],
wait_for_completion=True,
result_path="$.TrainResult"
), train_code
# evaluation step creation
def _evaluation(self, step):
evaluation_processor = ScriptProcessor(
command=['python3'],
image_uri=self.repository_uri,
role=self.sagemaker_execution_role,
sagemaker_session=self.sagemaker_session,
instance_count=self.evaluation_processor_settings['instance_count'],
instance_type=self.evaluation_processor_settings['instance_type'],
max_runtime_in_seconds=self.evaluation_processor_settings['max_runtime_in_seconds']
)
evaluation_code = self._upload('evaluation.py', "data/evaluation/code")
inputs = [
ProcessingInput(
source=step.output()["Payload"]["test_data"],
destination="/opt/ml/processing/test",
input_name="test_data",
),
ProcessingInput(
source=self.execution_input["TrainingOutputModel"],
destination="/opt/ml/processing/model",
input_name="model",
),
ProcessingInput(
source=evaluation_code,
destination="/opt/ml/processing/input/code",
input_name="evaluation_code",
),
]
outputs = [
ProcessingOutput(
source="/opt/ml/processing/evaluation",
destination=self.execution_input["EvaluationProcessingOutput"],
output_name="evaluation",
),
]
return ProcessingStep(
"Evaluation step",
processor=evaluation_processor,
job_name=self.execution_input["EvaluationProcessingJobName"],
inputs=inputs,
outputs=outputs,
experiment_config={
"ExperimentName": self.execution_input["ExperimentName"],
'TrialName': self.trial.trial_name,
'TrialComponentDisplayName': 'Evaluation'
},
container_entrypoint=["python3", "/opt/ml/processing/input/code/evaluation.py"],
)
def _experiment_upload(self):
step = stepfunctions.steps.compute.LambdaStep(
"Upload Experiment",
parameters={
"FunctionName": self.lambda_settings['experiments']['function_name'],
"Payload": {
"experiment-name": self.experiment_name,
"experiment_bucket_name": self.experiment_bucket_name,
"experiment_key": self.experiment_key,
"job": "experiment_upload"
},
},
)
step.add_retry(
Retry(error_equals=["States.TaskFailed"], interval_seconds=15, max_attempts=2, backoff_rate=4.0)
)
return step
def _create_job_name(self):
preprocess_job_name = f"{self.preprocess_job_name_prefix}-{self.timestamp}"
train_job_name = f"{self.train_job_name_prefix}-{self.timestamp}"
evaluation_job_name = f"{self.evaluation_job_name_prefix}-{self.timestamp}"
return preprocess_job_name, train_job_name, evaluation_job_name
def _upload(self, file, prefix):
return self.sagemaker_session.upload_data(
file,
bucket=self.bucket,
key_prefix=prefix,
)
def _create_experiments(self, experiment_name):
try:
experiment = Experiment.load(experiment_name=experiment_name)
except Exception as ex:
if "ResourceNotFound" in str(ex):
experiment = Experiment.create(
experiment_name=experiment_name,
description="example project experiments",
sagemaker_boto_client=boto3.client('sagemaker'))
return experiment
def _create_trial(self, experiment_name):
return Trial.create(
trial_name=self.timestamp,
experiment_name=self.experiment_name,
sagemaker_boto_client=boto3.client('sagemaker'),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", type=str, default='config.yml')
args, _ = parser.parse_known_args()
with open(args.c, 'rb') as f:
conf = yaml.load(f.read(), Loader=yaml.SafeLoader)
MLWorkflow(conf).create()
| 43.711443
| 163
| 0.619964
|
4a0f95e8594ee0e291e79ec9288e307bdef0fcf2
| 2,992
|
py
|
Python
|
plugins/filter/sudo_policy_for_unix_host_filters.py
|
llnagy76/ansible-privilege-manager
|
ca4fb68b58b1817a051c12ef06d759b2c4c1d63d
|
[
"Apache-2.0"
] | 1
|
2020-10-26T19:34:04.000Z
|
2020-10-26T19:34:04.000Z
|
plugins/filter/sudo_policy_for_unix_host_filters.py
|
llnagy76/ansible-privilege-manager
|
ca4fb68b58b1817a051c12ef06d759b2c4c1d63d
|
[
"Apache-2.0"
] | 1
|
2022-02-08T16:15:56.000Z
|
2022-02-13T19:52:43.000Z
|
plugins/filter/sudo_policy_for_unix_host_filters.py
|
llnagy76/ansible-privilege-manager
|
ca4fb68b58b1817a051c12ef06d759b2c4c1d63d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2021, One Identity LLC
# File: sudo_policy_for_unix_host_filters.py
# Desc: Ansible filters for sudo_policy_for_unix_host role
# Auth: Laszlo Nagy
# Note:
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
# Future module imports for consistency across Python versions
from __future__ import absolute_import, division, print_function
# Want classes to be new type for consistency across Python versions
__metaclass__ = type
from ansible.errors import AnsibleFilterError
# ------------------------------------------------------------------------------
# Helper functions
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def get_latest_sudo_policies(sudo_policies_list):
"""
Example of sudo_policies_list:
[
['qpm-rhel6-64a', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37a', '1634124307'],
['qpm-rhel6-64b', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37b', '1634124308'],
['qpm-rhel6-64c', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37c', '1634124309'],
['qpm-rhel6-64d', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37d', '1634124310'],
['qpm-rhel6-64c', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37c', '1634124369'],
['qpm-rhel6-64d', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37d', '1634124370'],
['qpm-rhel6-64e', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37e', '1634124371'],
['qpm-rhel6-64f', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37f', '1634124372']
]
"""
latest_sudo_policies = {}
for sudo_policy in sudo_policies_list:
hostname = sudo_policy[0]
if hostname not in latest_sudo_policies:
latest_sudo_policies[hostname] = sudo_policy
else:
if int(latest_sudo_policies[hostname][3]) < int(sudo_policy[3]):
latest_sudo_policies[hostname] = sudo_policy
latest_sudo_policies = list(latest_sudo_policies.values())
return latest_sudo_policies
# ------------------------------------------------------------------------------
# Classes
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
class FilterModule(object):
"""
sudo_policy_for_unix_host role jinja2 filters
"""
def filters(self):
filters = {
'latestsudopolicies': get_latest_sudo_policies,
}
return filters
| 39.893333
| 105
| 0.492981
|
4a0f96ddce4a147355883206521618efe24538a0
| 1,323
|
py
|
Python
|
day3_orig/snmp_ex2.py
|
tgnelson/pynet-ons-feb19
|
c789e14812b90342f640f690e2717d36152487bc
|
[
"Apache-2.0"
] | null | null | null |
day3_orig/snmp_ex2.py
|
tgnelson/pynet-ons-feb19
|
c789e14812b90342f640f690e2717d36152487bc
|
[
"Apache-2.0"
] | null | null | null |
day3_orig/snmp_ex2.py
|
tgnelson/pynet-ons-feb19
|
c789e14812b90342f640f690e2717d36152487bc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
SNMP query all of the interfaces on the pynet-rtr1 using SNMPv3.
Get the interface description and the in_octets/out_octets
"""
from __future__ import print_function, unicode_literals
from getpass import getpass
from snmp_helper import snmp_get_oid_v3, snmp_extract
IFDESCR = '1.3.6.1.2.1.2.2.1.2.'
IFINOCTETS = '1.3.6.1.2.1.2.2.1.10.'
IFOUTOCTETS = '1.3.6.1.2.1.2.2.1.16.'
def main():
"""
SNMP query all of the interfaces on the pynet-rtr1 using SNMPv3.
Get the interface description and the in_octets/out_octets
"""
my_key = getpass(prompt="Auth + Encryption Key: ")
# SNMPv3 Connection Parameters
ip_addr = '184.105.247.70'
a_user = 'pysnmp'
auth_key = my_key
encrypt_key = my_key
snmp_user = (a_user, auth_key, encrypt_key)
pynet_rtr1 = (ip_addr, 161)
print()
print("{:>15} {:>15} {:>15}".format("IfDescr", "IfInOctets", "IfOutOctets"))
for if_index in range(1, 8):
results = []
for base_oid in (IFDESCR, IFINOCTETS, IFOUTOCTETS):
the_oid = base_oid + str(if_index)
snmp_data = snmp_get_oid_v3(pynet_rtr1, snmp_user, oid=the_oid)
results.append(snmp_extract(snmp_data))
print("{:>15} {:>15} {:>15}".format(*results))
print()
if __name__ == "__main__":
main()
| 30.767442
| 80
| 0.656841
|
4a0f9792641654235b049732b3e625e2dc2bed7f
| 3,243
|
py
|
Python
|
src/python/txtai/api/routers/embeddings.py
|
personx000/txtai
|
fa342a4f6a69fb1e2cea0e85e39915f055ee8503
|
[
"Apache-2.0"
] | 1
|
2020-09-18T14:11:34.000Z
|
2020-09-18T14:11:34.000Z
|
src/python/txtai/api/routers/embeddings.py
|
aria1991/txtai
|
fa342a4f6a69fb1e2cea0e85e39915f055ee8503
|
[
"Apache-2.0"
] | null | null | null |
src/python/txtai/api/routers/embeddings.py
|
aria1991/txtai
|
fa342a4f6a69fb1e2cea0e85e39915f055ee8503
|
[
"Apache-2.0"
] | null | null | null |
"""
Defines API paths for embeddings endpoints.
"""
from typing import List
from fastapi import APIRouter, Body, HTTPException, Request
from .. import application
from ...app import ReadOnlyError
router = APIRouter()
@router.get("/search")
def search(query: str, request: Request):
"""
Finds documents in the embeddings model most similar to the input query. Returns
a list of {id: value, score: value} sorted by highest score, where id is the
document id in the embeddings model.
Args:
query: query text
request: FastAPI request
Returns:
list of {id: value, score: value}
"""
return application.get().search(query, request)
@router.post("/batchsearch")
def batchsearch(queries: List[str] = Body(...), limit: int = Body(...)):
"""
Finds documents in the embeddings model most similar to the input queries. Returns
a list of {id: value, score: value} sorted by highest score per query, where id is
the document id in the embeddings model.
Args:
queries: queries text
limit: maximum results
Returns:
list of {id: value, score: value} per query
"""
return application.get().batchsearch(queries, limit)
@router.post("/add")
def add(documents: List[dict] = Body(...)):
"""
Adds a batch of documents for indexing.
Args:
documents: list of {id: value, text: value}
"""
try:
application.get().add(documents)
except ReadOnlyError as e:
raise HTTPException(status_code=403, detail=e.args[0]) from e
@router.get("/index")
def index():
"""
Builds an embeddings index for previously batched documents.
"""
try:
application.get().index()
except ReadOnlyError as e:
raise HTTPException(status_code=403, detail=e.args[0]) from e
@router.get("/upsert")
def upsert():
"""
Runs an embeddings upsert operation for previously batched documents.
"""
try:
application.get().upsert()
except ReadOnlyError as e:
raise HTTPException(status_code=403, detail=e.args[0]) from e
@router.post("/delete")
def delete(ids: List = Body(...)):
"""
Deletes from an embeddings index. Returns list of ids deleted.
Args:
ids: list of ids to delete
Returns:
ids deleted
"""
try:
return application.get().delete(ids)
except ReadOnlyError as e:
raise HTTPException(status_code=403, detail=e.args[0]) from e
@router.get("/count")
def count():
"""
Deletes from an embeddings index. Returns list of ids deleted.
Args:
ids: list of ids to delete
Returns:
ids deleted
"""
return application.get().count()
@router.get("/transform")
def transform(text: str):
"""
Transforms text into an embeddings array.
Args:
text: input text
Returns:
embeddings array
"""
return application.get().transform(text)
@router.post("/batchtransform")
def batchtransform(texts: List[str] = Body(...)):
"""
Transforms list of text into embeddings arrays.
Args:
texts: list of text
Returns:
embeddings arrays
"""
return application.get().batchtransform(texts)
| 21.476821
| 86
| 0.63799
|
4a0f98e008cde55e46efac4e0d06ae778f49b5c8
| 3,950
|
py
|
Python
|
sdk/python/lib/pulumi/x/automation/_server.py
|
ynden/pulumi
|
f374b8a953817365e829da7142386f06229e5302
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/lib/pulumi/x/automation/_server.py
|
ynden/pulumi
|
f374b8a953817365e829da7142386f06229e5302
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/lib/pulumi/x/automation/_server.py
|
ynden/pulumi
|
f374b8a953817365e829da7142386f06229e5302
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import grpc
import sys
import traceback
from contextlib import suppress
from .workspace import PulumiFn
from ... import log
from ...runtime.proto import language_pb2, plugin_pb2, LanguageRuntimeServicer
from ...runtime import run_in_stack, reset_options, set_all_config
from ...errors import RunError
_py_version_less_than_3_7 = sys.version_info[0] == 3 and sys.version_info[1] < 7
class LanguageServer(LanguageRuntimeServicer):
program: PulumiFn
def __init__(self, program: PulumiFn) -> None:
self.program = program # type: ignore
@staticmethod
def on_pulumi_exit():
# Reset globals
reset_options()
def GetRequiredPlugins(self, request, context):
return language_pb2.GetRequiredPluginsResponse()
def Run(self, request, context):
# Configure the runtime so that the user program hooks up to Pulumi as appropriate.
engine_address = request.args[0] if request.args else ""
reset_options(
project=request.project,
monitor_address=request.monitor_address,
engine_address=engine_address,
stack=request.stack,
parallel=request.parallel,
preview=request.dryRun
)
if request.config:
set_all_config(request.config)
# The strategy here is derived from sdk/python/cmd/pulumi-language-python-exec
result = language_pb2.RunResponse()
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(run_in_stack(self.program))
except RunError as exn:
msg = str(exn)
log.error(msg)
result.error = str(msg)
return result
except grpc.RpcError as exn:
# If the monitor is unavailable, it is in the process of shutting down or has already
# shut down. Don't emit an error if this is the case.
if exn.code() == grpc.StatusCode.UNAVAILABLE:
log.debug("Resource monitor has terminated, shutting down.")
else:
msg = f"RPC error: {exn.details()}"
log.error(msg)
result.error = msg
return result
except Exception as exn:
msg = str(f"python inline source runtime error: {exn}\n{traceback.format_exc()}")
log.error(msg)
result.error = msg
return result
finally:
# If there's an exception during `run_in_stack`, it may result in pending asyncio tasks remaining unresolved
# at the time the loop is closed, which results in a `Task was destroyed but it is pending!` error being
# logged to stdout. To avoid this, we collect all the unresolved tasks in the loop and cancel them before
# closing the loop.
pending = asyncio.Task.all_tasks(loop) if _py_version_less_than_3_7 else asyncio.all_tasks(loop)
log.debug(f"Cancelling {len(pending)} tasks.")
for task in pending:
task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
loop.close()
sys.stdout.flush()
sys.stderr.flush()
return result
def GetPluginInfo(self, request, context):
return plugin_pb2.PluginInfo()
| 37.980769
| 120
| 0.650633
|
4a0f9991e841855c8b9dfac856396d4eae89a828
| 825
|
py
|
Python
|
scripts/remove_bad_ttr_fits.py
|
davmre/sigvisa
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/remove_bad_ttr_fits.py
|
davmre/sigvisa
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/remove_bad_ttr_fits.py
|
davmre/sigvisa
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
[
"BSD-3-Clause"
] | null | null | null |
from sigvisa import Sigvisa
from sigvisa.source.event import get_event
from sigvisa.models.ttime import tt_predict
import numpy as np
runid=14
s = Sigvisa()
query = "select f.evid, f.sta, f.fitid, fp.fpid, fp.phase, fp.arrival_time from sigvisa_coda_fit f, sigvisa_coda_fit_phase fp where f.fitid=fp.fitid and f.runid=%d" % runid
fits = s.sql(query)
bad_fits = []
for (evid, sta, fitid, fpid, phase, arrival_time) in fits:
ev = get_event(evid)
pred_at = ev.time + tt_predict(ev, sta, phase)
ttr = np.abs(pred_at - arrival_time)
if ttr > 25.0:
bad_fits.append(fpid)
print "fit", fitid, "has phase", phase, "with tt residual", ttr
cursor = s.dbconn.cursor()
for fpid in bad_fits:
cursor.execute("delete from sigvisa_coda_fit_phase where fpid=%d" % fpid)
cursor.close()
s.dbconn.commit()
| 31.730769
| 172
| 0.710303
|
4a0f99c86aacbda386f7cf475008f374d01467b1
| 1,075
|
py
|
Python
|
trac/upgrades/db35.py
|
DanVerh/trac_az
|
24ac877f9f43ad08372cb1d15a838d764d9e7df4
|
[
"BSD-3-Clause"
] | 324
|
2015-01-07T05:30:52.000Z
|
2022-03-22T07:20:56.000Z
|
trac/upgrades/db35.py
|
DanVerh/trac_az
|
24ac877f9f43ad08372cb1d15a838d764d9e7df4
|
[
"BSD-3-Clause"
] | 12
|
2017-03-24T23:24:55.000Z
|
2019-08-10T05:13:20.000Z
|
trac/upgrades/db35.py
|
DanVerh/trac_az
|
24ac877f9f43ad08372cb1d15a838d764d9e7df4
|
[
"BSD-3-Clause"
] | 142
|
2015-01-12T09:30:28.000Z
|
2022-02-21T00:39:38.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2021 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
from trac.db import Table, Column, Index, DatabaseManager
def do_upgrade(env, version, cursor):
"""Add the notify_watch table."""
table = Table('notify_watch', key='id')[
Column('id', auto_increment=True),
Column('sid'),
Column('authenticated', type='int'),
Column('class'),
Column('realm'),
Column('target'),
Index(['sid', 'authenticated', 'class']),
Index(['class', 'realm', 'target'])]
DatabaseManager(env).create_tables([table])
| 35.833333
| 67
| 0.634419
|
4a0f99d6d28dd32467a498d1b3a5dbbd5c495068
| 576
|
py
|
Python
|
ssig_site/base/migrations/0007_auto_20180322_1147.py
|
LeoMcA/103P_2018_team51
|
cca9e022456b1e2653f0b69420ea914661c39b27
|
[
"MIT"
] | null | null | null |
ssig_site/base/migrations/0007_auto_20180322_1147.py
|
LeoMcA/103P_2018_team51
|
cca9e022456b1e2653f0b69420ea914661c39b27
|
[
"MIT"
] | 61
|
2018-02-22T11:10:48.000Z
|
2022-03-11T23:20:25.000Z
|
ssig_site/base/migrations/0007_auto_20180322_1147.py
|
LeoMcA/103P_2018_team51
|
cca9e022456b1e2653f0b69420ea914661c39b27
|
[
"MIT"
] | 2
|
2018-02-10T11:26:52.000Z
|
2018-02-21T12:14:36.000Z
|
# Generated by Django 2.0.2 on 2018-03-22 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0006_auto_20180315_1654'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='description',
new_name='short_description',
),
migrations.AddField(
model_name='event',
name='long_description',
field=models.TextField(default=''),
preserve_default=False,
),
]
| 23.04
| 47
| 0.574653
|
4a0f99eeb65e1d1a546ce030ae9f553675add5df
| 1,085
|
py
|
Python
|
etl/cleaner.py
|
FAST-PROJ/professor-virtual
|
f5c6d2db9ba0f5f00d7b6fdfa712c4d2c3b09020
|
[
"MIT"
] | null | null | null |
etl/cleaner.py
|
FAST-PROJ/professor-virtual
|
f5c6d2db9ba0f5f00d7b6fdfa712c4d2c3b09020
|
[
"MIT"
] | null | null | null |
etl/cleaner.py
|
FAST-PROJ/professor-virtual
|
f5c6d2db9ba0f5f00d7b6fdfa712c4d2c3b09020
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = "Mateus Ferreira"
__copyright__ = "Copyright 2020, The FAST-PROJ Group"
__credits__ = ["Mateus Ferreira"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "FAST-PROJ"
__email__ = "#"
__status__ = "Development"
import re
class Cleaner:
def __init__(self):
self.cleanText = ''
def removeIsolatedNumbers(self, text):
return re.sub(r"^\d+[^\w-]", "", text, flags=re.MULTILINE)
def removeSpaces(self, text):
return re.sub(r"(^ +)|( +$)", "", text, flags=re.MULTILINE)
def removeBlankLines(self, text):
return re.sub(r"^(?:[\t ]*(?:\r?[\n\r]))+", "", text, flags=re.MULTILINE)
def removeSpecialCaracteres(self, text):
special = re.sub(r'[ç]', 'c', text)
a = re.sub(r'[áâãà]', 'a', special)
e = re.sub(r'[éêè]', 'e', a)
i = re.sub(r'[íîì]', 'i', e)
o = re.sub(r'[óôõò]', 'o', i)
u = re.sub(r'[úûù]', 'u', o)
return re.sub(r'[\(\)]', '', u)
def setCleanText(self, cleanText):
self.cleanText = cleanText
def getCleanText(self):
return self.cleanText
| 27.125
| 77
| 0.589862
|
4a0f9a333f3e471106544698ec90ec2026f97dcc
| 1,181
|
py
|
Python
|
examples/plot_shift_evoked.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | 2
|
2015-09-27T20:33:49.000Z
|
2020-04-22T19:10:56.000Z
|
examples/plot_shift_evoked.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_shift_evoked.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | 1
|
2018-09-15T09:45:38.000Z
|
2018-09-15T09:45:38.000Z
|
"""
==================================
Shifting time-scale in evoked data
==================================
"""
# Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
print(__doc__)
import matplotlib.pyplot as plt
import mne
from mne import fiff
from mne.datasets import sample
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading evoked data
evoked = fiff.Evoked(fname, setno='Left Auditory',
baseline=(None, 0), proj=True)
picks = fiff.pick_channels(ch_names=evoked.info['ch_names'],
include="MEG 2332", exclude="bad")
# Create subplots
f, (ax1, ax2, ax3) = plt.subplots(3)
evoked.plot(exclude=[], picks=picks, axes=ax1,
titles=dict(grad='Before time shifting'))
# Apply relative time-shift of 500 ms
evoked.shift_time(0.5, relative=True)
evoked.plot(exclude=[], picks=picks, axes=ax2,
titles=dict(grad='Relative shift: 500 ms'))
# Apply absolute time-shift of 500 ms
evoked.shift_time(0.5, relative=False)
evoked.plot(exclude=[], picks=picks, axes=ax3,
titles=dict(grad='Absolute shift: 500 ms'))
mne.viz.tight_layout()
| 25.12766
| 61
| 0.638442
|
4a0f9adca0413414adcd3021dd15031703266db2
| 7,496
|
py
|
Python
|
tests/test_compile.py
|
rahulbahal7/restricted-python
|
c39cffe71dfc30630e946977735303d3a65b0383
|
[
"ZPL-2.1"
] | null | null | null |
tests/test_compile.py
|
rahulbahal7/restricted-python
|
c39cffe71dfc30630e946977735303d3a65b0383
|
[
"ZPL-2.1"
] | null | null | null |
tests/test_compile.py
|
rahulbahal7/restricted-python
|
c39cffe71dfc30630e946977735303d3a65b0383
|
[
"ZPL-2.1"
] | null | null | null |
from RestrictedPython import compile_restricted
from RestrictedPython import compile_restricted_eval
from RestrictedPython import compile_restricted_exec
from RestrictedPython import compile_restricted_single
from RestrictedPython import CompileResult
from RestrictedPython._compat import IS_PY2
from RestrictedPython._compat import IS_PY3
from RestrictedPython._compat import IS_PY38_OR_GREATER
from tests.helper import restricted_eval
import platform
import pytest
import types
def test_compile__compile_restricted_invalid_code_input():
with pytest.raises(TypeError):
compile_restricted(object(), '<string>', 'exec')
with pytest.raises(TypeError):
compile_restricted(object(), '<string>', 'eval')
with pytest.raises(TypeError):
compile_restricted(object(), '<string>', 'single')
def test_compile__compile_restricted_invalid_policy_input():
with pytest.raises(TypeError):
compile_restricted("pass", '<string>', 'exec', policy=object)
def test_compile__compile_restricted_invalid_mode_input():
with pytest.raises(TypeError):
compile_restricted("pass", '<string>', 'invalid')
INVALID_ASSINGMENT = """
1 = 2
"""
def test_compile__invalid_syntax():
with pytest.raises(SyntaxError) as err:
compile_restricted(INVALID_ASSINGMENT, '<string>', 'exec')
if IS_PY38_OR_GREATER:
assert "cannot assign to literal at statement:" in str(err.value)
else:
assert "can't assign to literal at statement:" in str(err.value)
def test_compile__compile_restricted_exec__1():
"""It returns a CompileResult on success."""
result = compile_restricted_exec('a = 42')
assert result.__class__ == CompileResult
assert result.errors == ()
assert result.warnings == []
assert result.used_names == {}
glob = {}
exec(result.code, glob)
assert glob['a'] == 42
def test_compile__compile_restricted_exec__2():
"""It compiles without restrictions if there is no policy."""
result = compile_restricted_exec('_a = 42', policy=None)
assert result.errors == ()
assert result.warnings == []
assert result.used_names == {}
glob = {}
exec(result.code, glob)
assert glob['_a'] == 42
def test_compile__compile_restricted_exec__3():
"""It returns a tuple of errors if the code is not allowed.
There is no code in this case.
"""
result = compile_restricted_exec('_a = 42\n_b = 43')
errors = (
'Line 1: "_a" is an invalid variable name because it starts with "_"',
'Line 2: "_b" is an invalid variable name because it starts with "_"')
assert result.errors == errors
assert result.warnings == []
assert result.used_names == {}
assert result.code is None
def test_compile__compile_restricted_exec__4():
"""It does not return code on a SyntaxError."""
result = compile_restricted_exec('asdf|')
assert result.code is None
assert result.warnings == []
assert result.used_names == {}
assert result.errors == (
"Line 1: SyntaxError: invalid syntax at statement: 'asdf|'",)
def test_compile__compile_restricted_exec__5():
"""It does not return code if the code contains a NULL byte."""
result = compile_restricted_exec('a = 5\x00')
assert result.code is None
assert result.warnings == []
assert result.used_names == {}
if IS_PY2:
assert result.errors == (
'compile() expected string without null bytes',)
else:
assert result.errors == (
'source code string cannot contain null bytes',)
EXEC_STATEMENT = """\
def no_exec():
exec 'q = 1'
"""
@pytest.mark.skipif(
IS_PY2,
reason="exec statement in Python 2 is handled by RestrictedPython ")
def test_compile__compile_restricted_exec__10():
"""It is a SyntaxError to use the `exec` statement. (Python 3 only)"""
result = compile_restricted_exec(EXEC_STATEMENT)
assert (
'Line 2: SyntaxError: Missing parentheses in call to \'exec\' at '
'statement: "exec \'q = 1\'"',) == result.errors
FUNCTION_DEF = """\
def a():
pass
"""
def test_compile__compile_restricted_eval__1():
"""It compiles code as an Expression.
Function definitions are not allowed in Expressions.
"""
result = compile_restricted_eval(FUNCTION_DEF)
assert result.errors == (
"Line 1: SyntaxError: invalid syntax at statement: 'def a():'",)
def test_compile__compile_restricted_eval__2():
"""It compiles code as an Expression."""
assert restricted_eval('4 * 6') == 24
def test_compile__compile_restricted_eval__used_names():
result = compile_restricted_eval("a + b + func(x)")
assert result.errors == ()
assert result.warnings == []
assert result.used_names == {'a': True, 'b': True, 'x': True, 'func': True}
def test_compile__compile_restricted_csingle():
"""It compiles code as an Interactive."""
result = compile_restricted_single('4 * 6')
assert result.code is None
assert result.errors == (
'Line None: Interactive statements are not allowed.',
)
PRINT_EXAMPLE = """
def a():
print 'Hello World!'
"""
@pytest.mark.skipif(
IS_PY3,
reason="Print statement is gone in Python 3."
"Test Deprecation Warming in Python 2")
def test_compile_restricted():
"""This test checks compile_restricted itself if that emit Python warnings.
For actual tests for print statement see: test_print_stmt.py
"""
with pytest.warns(SyntaxWarning) as record:
result = compile_restricted(PRINT_EXAMPLE, '<string>', 'exec')
assert isinstance(result, types.CodeType)
# Non-CPython versions have a RuntimeWarning, too.
if len(record) > 2: # pragma: no cover
record.pop()
assert len(record) == 1
assert record[0].message.args[0] == \
"Line 2: Prints, but never reads 'printed' variable."
EVAL_EXAMPLE = """
def a():
eval('2 + 2')
"""
def test_compile_restricted_eval():
"""This test checks compile_restricted itself if that raise Python errors.
"""
with pytest.raises(SyntaxError,
match="Line 3: Eval calls are not allowed."):
compile_restricted(EVAL_EXAMPLE, '<string>', 'exec')
def test_compile___compile_restricted_mode__1(recwarn, mocker):
"""It warns when using another Python implementation than CPython."""
if platform.python_implementation() == 'CPython': # pragma: no cover
# Using CPython we have to fake the check:
mocker.patch('RestrictedPython.compile.IS_CPYTHON', new=False)
compile_restricted('42')
assert len(recwarn) == 1
w = recwarn.pop()
assert w.category == RuntimeWarning
assert str(w.message) == str(
'RestrictedPython is only supported on CPython: use on other Python '
'implementations may create security issues.'
)
@pytest.mark.skipif(
platform.python_implementation() == 'CPython',
reason='Warning only present if not CPython.')
def test_compile_CPython_warning(recwarn, mocker): # pragma: no cover
"""It warns when using another Python implementation than CPython."""
assert platform.python_implementation() != 'CPython'
with pytest.warns(RuntimeWarning) as record:
compile_restricted('42')
assert len(record) == 1
assert str(record[0].message) == str(
'RestrictedPython is only supported on CPython: use on other Python '
'implementations may create security issues.'
)
| 32.310345
| 79
| 0.685699
|
4a0f9af70581dd969bb377340ab2b7ab1d85b8cc
| 18,043
|
py
|
Python
|
trax/data/tf_inputs_test.py
|
stjordanis/trax
|
517dc78174ff87c0b753b97247afc1408f8822fc
|
[
"Apache-2.0"
] | 2
|
2020-12-01T08:40:17.000Z
|
2021-08-10T05:33:21.000Z
|
trax/data/tf_inputs_test.py
|
stjordanis/trax
|
517dc78174ff87c0b753b97247afc1408f8822fc
|
[
"Apache-2.0"
] | null | null | null |
trax/data/tf_inputs_test.py
|
stjordanis/trax
|
517dc78174ff87c0b753b97247afc1408f8822fc
|
[
"Apache-2.0"
] | 1
|
2020-11-01T11:22:54.000Z
|
2020-11-01T11:22:54.000Z
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for trax.supervised.tf_inputs."""
import collections
import os
import gin
import numpy as np
from t5.data import preprocessors as t5_processors
from t5.data import test_utils as t5_test_utils
import tensorflow as tf
import tensorflow_datasets as tfds
from trax.data import inputs # pylint: disable=unused-import
from trax.data import tf_inputs
pkg_dir, _ = os.path.split(__file__)
_TESTDATA = os.path.join(pkg_dir, 'testdata')
def _test_dataset_ints(inp_lengths, tgt_lengths):
"""Create a test dataset of int64 tensors of given shapes."""
def generator():
for inp_len, tgt_len in zip(inp_lengths, tgt_lengths):
inp = np.ones([inp_len], dtype=np.int64)
tgt = np.ones([tgt_len], dtype=np.int64)
yield {'inputs': inp, 'targets': tgt}
types = {'inputs': tf.int64, 'targets': tf.int64}
shapes = {'inputs': tf.TensorShape([None]), 'targets': tf.TensorShape([None])}
return tf.data.Dataset.from_generator(
generator, output_types=types, output_shapes=shapes)
def _load_dataset(name, split='train'):
return tfds.load(
name=name, split=split, data_dir=_TESTDATA, shuffle_files=False)
def _c4_dataset(split='train'):
return _load_dataset('c4:2.3.0', split=split)
def _spm_path():
return os.path.join(_TESTDATA, 'sentencepiece.model')
def _t5_gin_config():
# The following pages worth of gin configuration are required because a lot
# of T5 functions have `gin.REQUIRED` in code, i.e. you cannot use these
# functions at all without having configured gin.
noise_density = 0.15
max_input_length = 50
# What preprocessors to apply - we select a random chunk of the document if
# it exceeds a certain lengths (`select_random_chunk`), then split up long
# examples (`split_tokens`) and finally the denoising objective (`denoise`).
#
# In addition to this T5 concates multiple documents together to reduce
# padding (`reduce_concat_tokens`) after `select_random_chunk`, but we skip
# that since we don't do sequence packing.
gin.bind_parameter('unsupervised.preprocessors', [
t5_processors.select_random_chunk,
t5_processors.split_tokens,
t5_processors.denoise,
])
# select_random_chunk
gin.bind_parameter('select_random_chunk.feature_key', 'targets')
gin.bind_parameter('select_random_chunk.max_length', max_input_length)
# reduce_concat_tokens
gin.bind_parameter('random_spans_helper.extra_tokens_per_span_inputs', 1)
gin.bind_parameter('random_spans_helper.extra_tokens_per_span_targets', 1)
gin.bind_parameter('random_spans_helper.inputs_length', max_input_length)
gin.bind_parameter('random_spans_helper.mean_noise_span_length', 3.0)
gin.bind_parameter('random_spans_helper.noise_density', noise_density)
# split_tokens
gin.bind_parameter('split_tokens.max_tokens_per_segment',
t5_processors.random_spans_tokens_length())
# denoise
gin.bind_parameter('denoise.inputs_fn',
t5_processors.noise_span_to_unique_sentinel)
gin.bind_parameter('denoise.noise_density', noise_density)
gin.bind_parameter('denoise.noise_mask_fn',
t5_processors.random_spans_noise_mask)
gin.bind_parameter('denoise.targets_fn',
t5_processors.nonnoise_span_to_unique_sentinel)
class TFInputsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
gin.clear_config()
def test_tokenize_detokenize(self):
def dataset():
yield 'I have a cat.'
# Character-level.
tok_char = list(tf_inputs.tokenize(dataset(), vocab_type='char'))
self.assertAllEqual(tok_char[0],
np.array([ord(c) for c in 'I have a cat.']))
detok = tf_inputs.detokenize(tok_char[0], vocab_type='char')
self.assertEqual(detok, 'I have a cat.')
# Sentencepiece.
tok_spc = list(tf_inputs.tokenize(
dataset(), vocab_type='sentencepiece',
vocab_dir=_TESTDATA, vocab_file='sentencepiece.model'))
self.assertAllEqual(tok_spc[0], np.array([27, 43, 3, 9, 1712, 5]))
detok = tf_inputs.detokenize(
list(tok_spc[0]), vocab_type='sentencepiece',
vocab_dir=_TESTDATA, vocab_file='sentencepiece.model')
self.assertEqual(detok, 'I have a cat.')
# Subword.
tok_sbw = list(tf_inputs.tokenize(
dataset(), vocab_type='subword',
vocab_dir=_TESTDATA, vocab_file='en_8k.subword'))
self.assertAllEqual(tok_sbw[0], np.array([139, 96, 12, 2217, 2, 21]))
detok = tf_inputs.detokenize(
tok_sbw[0], vocab_type='subword',
vocab_dir=_TESTDATA, vocab_file='en_8k.subword')
self.assertEqual(detok, 'I have a cat.')
def test_tokenize_keys_reservedids(self):
def dataset():
yield ('Cat.', 'Dog.')
tok_char1 = list(tf_inputs.tokenize(
dataset(), vocab_type='char', n_reserved_ids=5))
self.assertAllEqual(tok_char1[0][0], np.array([ord(c) + 5 for c in 'Cat.']))
self.assertAllEqual(tok_char1[0][1], np.array([ord(c) + 5 for c in 'Dog.']))
tok_char2 = list(tf_inputs.tokenize(
dataset(), keys=[0], vocab_type='char', n_reserved_ids=2))
self.assertAllEqual(tok_char2[0][0], np.array([ord(c) + 2 for c in 'Cat.']))
self.assertEqual(tok_char2[0][1], 'Dog.')
def test_tokenize_dict(self):
def dataset():
yield {'a': 'Cat.', 'b': 'Dog.'}
tok_char1 = list(tf_inputs.tokenize(dataset(), vocab_type='char'))
self.assertAllEqual(tok_char1[0]['a'], np.array([ord(c) for c in 'Cat.']))
self.assertAllEqual(tok_char1[0]['b'], np.array([ord(c) for c in 'Dog.']))
tok_char2 = list(tf_inputs.tokenize(dataset(), keys=['a'],
vocab_type='char'))
self.assertAllEqual(tok_char2[0]['a'], np.array([ord(c) for c in 'Cat.']))
self.assertEqual(tok_char2[0]['b'], 'Dog.')
def test_vocab_size(self):
# Character-level.
char_size = tf_inputs.vocab_size(vocab_type='char', n_reserved_ids=11)
self.assertEqual(char_size, 256 + 11)
# Sentencepiece.
spc_size = tf_inputs.vocab_size(
vocab_type='sentencepiece',
vocab_dir=_TESTDATA, vocab_file='sentencepiece.model')
self.assertEqual(spc_size, 32000)
# Subword.
sbw_size = tf_inputs.vocab_size(
vocab_type='subword',
vocab_dir=_TESTDATA, vocab_file='en_8k.subword')
self.assertEqual(sbw_size, 8183)
def test_c4_bare_preprocess_fn(self):
dataset = _c4_dataset()
example = list(tfds.as_numpy(dataset.take(1)))[0]
# Targets are NOT in the example.
self.assertNotIn('targets', example)
self.assertIn('text', example)
text = example['text']
# This should convert the dataset to an inputs/targets that are tokenized.
dataset = tf_inputs.c4_bare_preprocess_fn(dataset, spm_path=_spm_path())
example = list(tfds.as_numpy(dataset.take(1)))[0]
# Earlier text is now stored in targets_plaintext
self.assertIn('targets_plaintext', example)
self.assertEqual(example['targets_plaintext'], text)
# Targets are now tokenized.
self.assertIn('targets', example)
self.assertIsInstance(example['targets'], np.ndarray)
self.assertEqual(example['targets'].dtype, np.int64)
self.assertGreater(len(example['targets']), 0)
self.assertEqual(example['targets'][-1], 1) # we add EOS at the end.
# Inputs exist but is empty because t5 preprocessors' unsupervised wasn't
# gin configured with any.
self.assertIn('inputs', example)
self.assertEqual(len(example['inputs']), 0)
def test_c4_preprocess(self):
def load_c4_dataset(split='train'):
dataset = _c4_dataset(split=split)
return dataset.map(lambda example: (example, example['text']))
def examine_processed_dataset(proc_dataset):
count = 0
lengths = []
for example in tfds.as_numpy(proc_dataset):
count += 1
ex = example[0]
# Targets are in the example.
self.assertIn('targets', ex)
self.assertEqual(ex['targets'].dtype, np.int64)
lengths.append(len(ex['targets']))
return count, lengths
unfiltered_count = 0
for example in tfds.as_numpy(load_c4_dataset()):
unfiltered_count += 1
# Targets are NOT in the example.
self.assertNotIn('targets', example[0])
proc_dataset = tf_inputs.c4_preprocess(load_c4_dataset(), False, 2048)
# `examine_processed_dataset` has some asserts in it.
proc_count, char_lengths = examine_processed_dataset(proc_dataset)
# Both the original and filtered datasets have examples.
self.assertGreater(unfiltered_count, 0)
self.assertGreater(proc_count, 0)
# Because we filter out some entries on length.
self.assertLess(proc_count, unfiltered_count)
# Preprocess using the sentencepiece model in testdata.
spc_proc_dataset = tf_inputs.c4_preprocess(
load_c4_dataset(), False, 2048, tokenization='spc',
spm_path=_spm_path())
spc_proc_count, spc_lengths = examine_processed_dataset(spc_proc_dataset)
# spc shortens the target sequence a lot, should be almost equal to
# unfiltered
self.assertLessEqual(proc_count, spc_proc_count)
self.assertEqual(unfiltered_count, spc_proc_count)
# Assert all spc_lengths are lesser than their char counterparts.
for spc_len, char_len in zip(spc_lengths, char_lengths):
self.assertLessEqual(spc_len, char_len)
def test_c4(self):
gin.bind_parameter('c4_preprocess.max_target_length', 2048)
gin.bind_parameter('c4_preprocess.tokenization', 'spc')
gin.bind_parameter('c4_preprocess.spm_path', _spm_path())
# Just make sure this doesn't throw.
_ = tf_inputs.data_streams(
'c4', data_dir=_TESTDATA, input_name='targets', target_name='text',
preprocess_fn=tf_inputs.c4_preprocess)
def test_c4_bare_preprocess_fn_denoising_objective(self):
_t5_gin_config()
dataset = _c4_dataset()
dataset = tf_inputs.c4_bare_preprocess_fn(dataset, spm_path=_spm_path())
example = list(tfds.as_numpy(dataset.take(1)))[0]
# Assertions now.
self.assertIn('targets', example)
targets = example['targets']
self.assertIsInstance(targets, np.ndarray)
self.assertEqual(targets.dtype, np.int64)
self.assertGreater(len(targets), 0)
self.assertIn('inputs', example)
_inputs = example['inputs'] # pylint: disable=invalid-name
self.assertIsInstance(_inputs, np.ndarray)
self.assertEqual(_inputs.dtype, np.int64)
self.assertGreater(len(_inputs), 0)
# WHP inputs will have the bulk of the text.
self.assertGreater(len(_inputs), len(targets))
# WHP there will be two sentinel tokens in the inputs and targets.
inputs_counter = collections.Counter(_inputs.tolist())
targets_counter = collections.Counter(targets.tolist())
self.assertEqual(1, inputs_counter[31999])
self.assertEqual(1, inputs_counter[31998])
self.assertEqual(1, targets_counter[31999])
self.assertEqual(1, targets_counter[31998])
def test_c4_pretrain(self):
_t5_gin_config()
gin.bind_parameter('c4_bare_preprocess_fn.spm_path', _spm_path())
gin.bind_parameter('batcher.batch_size_per_device', 8)
gin.bind_parameter('batcher.eval_batch_size', 8)
gin.bind_parameter('batcher.max_eval_length', 50)
gin.bind_parameter('batcher.buckets', ([51], [8, 1]))
# Just make sure this doesn't throw.
_ = tf_inputs.data_streams(
'c4', data_dir=_TESTDATA, input_name='inputs', target_name='targets',
bare_preprocess_fn=tf_inputs.c4_bare_preprocess_fn)
def test_generic_text_dataset_preprocess_fn(self):
dataset = _load_dataset('squad/v1.1:2.0.0')
example, = tfds.as_numpy(dataset.take(1))
self.assertNotIn('inputs', example)
self.assertNotIn('targets', example)
proc_dataset = tf_inputs.generic_text_dataset_preprocess_fn(
dataset, spm_path=_spm_path(),
text_preprocess_fns=[lambda ds, training: t5_processors.squad(ds)],
copy_plaintext=True,
debug_print_examples=True,
debug_print_examples_rate=1.0)
proc_example, = tfds.as_numpy(proc_dataset.take(1))
self.assertIn('inputs', proc_example)
self.assertIn('targets', proc_example)
self.assertEqual(proc_example['inputs'].dtype, np.int64)
self.assertEqual(proc_example['targets'].dtype, np.int64)
# TODO(afrozm): Why does this test take so much time?
def test_inputs_using_generic_text_dataset_preprocess_fn(self):
gin.bind_parameter(
'generic_text_dataset_preprocess_fn.spm_path', _spm_path())
gin.bind_parameter(
'generic_text_dataset_preprocess_fn.text_preprocess_fns',
[lambda ds, training: t5_processors.squad(ds)])
# Just make sure this doesn't throw.
def data_streams():
return tf_inputs.data_streams(
'squad', data_dir=_TESTDATA, input_name='inputs',
target_name='targets',
bare_preprocess_fn=tf_inputs.generic_text_dataset_preprocess_fn,
shuffle_buffer_size=1)
n_devices = 3
squad_inputs = inputs.batcher(
data_streams=data_streams,
max_eval_length=512,
buckets=([513,], [n_devices, n_devices])
)
eval_stream = squad_inputs.eval_stream(n_devices)
inps, tgts, _ = next(eval_stream)
# We can only assert that the batch dim gets divided by n_devices.
self.assertEqual(inps.shape[0] % n_devices, 0)
self.assertEqual(tgts.shape[0] % n_devices, 0)
def test_filter_dataset_on_len(self):
# {1, 2}, {2, 4}, {3, 6} ... {10, 20}
ds = _test_dataset_ints(range(1, 11), range(2, 21, 2))
ds1 = tf_inputs.filter_dataset_on_len(
ds, True, {'inputs': [4, 8], 'targets': [14, 20]})
# Only {7, 14} and {8, 16} satisfy this.
self.assertLen(list(ds1.as_numpy_iterator()), 2)
ds2 = tf_inputs.filter_dataset_on_len(
ds, False, len_map={'inputs': [4, 8], 'targets': [14, 20]},
filter_on_eval=False)
# This is eval and we aren't supposed to filter it.
self.assertLen(list(ds2.as_numpy_iterator()), 10)
ds3 = tf_inputs.filter_dataset_on_len(
ds, False, len_map={'inputs': [4, 8], 'targets': [14, 20]},
filter_on_eval=True)
# This is eval and we are asked to filter it.
self.assertLen(list(ds3.as_numpy_iterator()), 2)
def test_truncate_dataset_on_len(self):
ds = _test_dataset_ints([5, 6, 7], [8, 9, 10])
ds1 = tf_inputs.truncate_dataset_on_len(
ds, True, len_map={'inputs': 6, 'targets': 4})
expected_ds = _test_dataset_ints([5, 6, 6], [4, 4, 4])
# training, should filter.
t5_test_utils.assert_dataset(ds1, list(expected_ds.as_numpy_iterator()))
# not Training, shouldn't filter.
ds2 = tf_inputs.truncate_dataset_on_len(
ds, False, len_map={'inputs': 6, 'targets': 4})
t5_test_utils.assert_dataset(ds2, list(ds.as_numpy_iterator()))
# not Training, but asked to filter, should filter.
ds3 = tf_inputs.truncate_dataset_on_len(
ds, False, len_map={'inputs': 6, 'targets': 4}, truncate_on_eval=True)
t5_test_utils.assert_dataset(ds3, list(expected_ds.as_numpy_iterator()))
def test_get_t5_preprocessor_by_name(self):
gin.clear_config()
gin.parse_config("""
get_t5_preprocessor_by_name.name = 'rekey'
get_t5_preprocessor_by_name.fn_kwargs = {'key_map': {'inputs': 'other', 'targets': 'text'}}
""")
prep_rekey = tf_inputs.get_t5_preprocessor_by_name()
og_dataset = tf.data.Dataset.from_tensors({
'text': 'That is good.', 'other': 'That is bad.'})
training = True
dataset = prep_rekey(og_dataset, training)
t5_test_utils.assert_dataset(
dataset,
{'inputs': 'That is bad.', 'targets': 'That is good.'})
def test_pad_dataset_to_length(self):
ds = _test_dataset_ints([5, 6, 7], [6, 7, 8])
ds1 = tf_inputs.pad_dataset_to_length(
ds, True, len_map={'inputs': 7, 'targets': 10})
expected_ds = [
{
'inputs': np.array([1, 1, 1, 1, 1, 0, 0], dtype=np.int64),
'targets': np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=np.int64),
},
{
'inputs': np.array([1, 1, 1, 1, 1, 1, 0], dtype=np.int64),
'targets': np.array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64),
},
{
'inputs': np.array([1, 1, 1, 1, 1, 1, 1], dtype=np.int64),
'targets': np.array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0], dtype=np.int64),
},
]
t5_test_utils.assert_dataset(ds1, expected_ds)
def test_lm_token_preprocessing(self):
ds = _test_dataset_ints([1, 2, 3], [3, 2, 1])
ds1 = tf_inputs.lm_token_preprocessing(ds, True)
# pylint: disable=bad-whitespace
expected_ds = [
{
'inputs': np.array([1, 0, 1, 1, 1], dtype=np.int64),
'targets': np.array([1, 0, 1, 1, 1], dtype=np.int64),
'mask': np.array([0, 0, 1, 1, 1], dtype=np.int64),
},
{
'inputs': np.array([1, 1, 0, 1, 1], dtype=np.int64),
'targets': np.array([1, 1, 0, 1, 1], dtype=np.int64),
'mask': np.array([0, 0, 0, 1, 1], dtype=np.int64),
},
{
'inputs': np.array([1, 1, 1, 0, 1], dtype=np.int64),
'targets': np.array([1, 1, 1, 0, 1], dtype=np.int64),
'mask': np.array([0, 0, 0, 0, 1], dtype=np.int64),
},
]
# pylint: enable=bad-whitespace
t5_test_utils.assert_dataset(ds1, expected_ds)
if __name__ == '__main__':
tf.test.main()
| 37.049281
| 97
| 0.679155
|
4a0f9ba2ef1190141c6cafa4dffaf435784457ea
| 5,669
|
py
|
Python
|
tests/changelog_extractor/test_md.py
|
sebix/packaging-utils
|
8e2b21220c18bbd396ffb84c80844cba0c954282
|
[
"0BSD"
] | 4
|
2018-07-27T21:13:38.000Z
|
2022-03-31T14:31:59.000Z
|
tests/changelog_extractor/test_md.py
|
sebix/packaging-utils
|
8e2b21220c18bbd396ffb84c80844cba0c954282
|
[
"0BSD"
] | 10
|
2019-07-06T15:46:46.000Z
|
2021-08-08T14:04:06.000Z
|
tests/changelog_extractor/test_md.py
|
sebix/packaging-utils
|
8e2b21220c18bbd396ffb84c80844cba0c954282
|
[
"0BSD"
] | 1
|
2019-09-04T05:06:36.000Z
|
2019-09-04T05:06:36.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 21 13:11:15 2019
@author: ele
"""
import unittest
from packaging_utils.changelog_extractor.changelog_extractor import convert_base, convert_base_after, convert_markdown
shodan_original = """
CHANGELOG
=========
1.14.0
----------
* New command **shodan version** (#104).
* Only change api_key file permissions if needed (#103)
1.13.0
------
* New command **shodan domain** to lookup a domain in Shodan's DNS database
* Override environment configured settings if explicit proxy settings are supplied (@cudeso)
1.12.1
------
* Fix Excel file conversion that resulted in empty .xlsx files
1.12.0
------
* Add new methods to ignore/ unignore trigger notifications
"""
shodan_expected = """
- update to version 1.14.0:
- New command **shodan version** (#104).
- Only change api_key file permissions if needed (#103)
""".strip()
passivetotal = """
## v2.5.1
#### Enhancements
- Adds support for the Illuminate CTI module with Intel Profile API library
calls and `analzyer` objects. Includes support for all API parameters and
handles pagination automatically.
#### Bug Fixes
- Filter methods on RecordList objects now consistently return lists instead of
filters.
## v2.5.0
#### Enhancements:
- Raise `AnalyzerAPIError` when a non-200 response is returned from the API.
""".strip()
passivetotal_expected = """
- update to version 2.5.1:
- Enhancements:
- Adds support for the Illuminate CTI module with Intel Profile API library
calls and `analzyer` objects. Includes support for all API parameters and
handles pagination automatically.
- Bug Fixes:
- Filter methods on RecordList objects now consistently return lists instead of
filters.
- update to version 2.5.0:
- Enhancements:
- Raise `AnalyzerAPIError` when a non-200 response is returned from the API.
""".strip()
isort = """
Changelog
=========
NOTE: isort follows the [semver](https://semver.org/) versioning standard.
Find out more about isort's release policy [here](https://pycqa.github.io/isort/docs/major_releases/release_policy).
### 5.9.1 June 21st 2021 [hotfix]
- Fixed #1758: projects with many files and skip_ignore set can lead to a command-line overload.
### 5.9.0 June 21st 2021
- Improved CLI startup time.
""".strip()
isort_expected = """
- update to version 5.9.1:
- Fixed #1758: projects with many files and skip_ignore set can lead to a command-line overload.
- update to version 5.9.0:
- Improved CLI startup time.
""".strip()
dateutil = """
Version 2.8.2 (2021-07-08)
==========================
Data updates
------------
- Updated tzdata version to 2021a. (gh pr #1128)
Bugfixes
--------
- Fixed a bug in the parser where non-``ValueError`` exceptions would be raised
during exception handling; this would happen, for example, if an
``IllegalMonthError`` was raised in ``dateutil`` code. Fixed by Mark Bailey.
(gh issue #981, pr #987).
Version 2.8.1 (2019-11-03)
==========================
Data updates
------------
- Updated tzdata version to 2019c.
""".strip()
dateutil_expected = """
- update to version 2.8.2:
- Data updates:
- Updated tzdata version to 2021a. (gh pr #1128)
- Bugfixes:
- Fixed a bug in the parser where non-``ValueError`` exceptions would be raised
during exception handling; this would happen, for example, if an
``IllegalMonthError`` was raised in ``dateutil`` code. Fixed by Mark Bailey.
(gh issue #981, pr #987).
- update to version 2.8.1:
- Data updates:
- Updated tzdata version to 2019c.
""".strip()
pdftk = """
## [3.3.2] - 2021-12-20
### Fixed
- Crash with inputs that contain null pointers
- Support reading xfdf forms from stdin
- Crash with xdf forms with no catalog
- Compatibility with bcprov 1.70
- Crash with non-conforming inputs
""".strip()
pdftk_expected = """
- update to version 3.3.2:
- Fixed:
- Crash with inputs that contain null pointers
- Support reading xfdf forms from stdin
- Crash with xdf forms with no catalog
- Compatibility with bcprov 1.70
- Crash with non-conforming inputs
""".strip()
class TextMd(unittest.TestCase):
maxDiff = None
def test_shodan(self):
self.assertEqual(convert_base_after(convert_markdown(convert_base(shodan_original,
'shodan')),
'1.13.0'),
shodan_expected)
def test_passivetotal(self):
"""
passivetotal has weird sectioning, skipping ###, let's just live with that.
Also, one time the section header has a trainling comma, the other has not.
"""
self.assertEqual(convert_base_after(convert_markdown(passivetotal)),
passivetotal_expected)
def test_isort(self):
"""
Test isort changelog
Let's ignore the whitespace before the - list specifiers
"""
self.assertEqual(convert_base_after(convert_markdown(convert_base(isort, 'isort'))),
isort_expected)
def test_dateutil(self):
"""
Test dateutil changelog
Uses first level headings for the versions.
"""
self.assertEqual(convert_base_after(convert_markdown(convert_base(dateutil, 'dateutil'))),
dateutil_expected)
def test_pdftk(self):
"""
Test pdftk changelog
Encloses the [version] in brackets and has two levels with headings (which we ignore for now).
"""
self.assertEqual(convert_base_after(convert_markdown(convert_base(pdftk, 'pdftk'))),
pdftk_expected)
| 28.487437
| 118
| 0.663786
|
4a0f9c379e9021ebb17368f67fd0bf7f4a0ca296
| 6,038
|
py
|
Python
|
tests/test_logging.py
|
wakandan/pytorch-lightning
|
0228fba45a543ec7ebb5d74465c9b516b728e897
|
[
"Apache-2.0"
] | 1
|
2019-12-29T18:55:12.000Z
|
2019-12-29T18:55:12.000Z
|
tests/test_logging.py
|
huntzhan/pytorch-lightning
|
db0587f15823a7ef49f953642abf35f3d73e8324
|
[
"Apache-2.0"
] | null | null | null |
tests/test_logging.py
|
huntzhan/pytorch-lightning
|
db0587f15823a7ef49f953642abf35f3d73e8324
|
[
"Apache-2.0"
] | null | null | null |
import os
import pickle
import numpy as np
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.testing import LightningTestModel
from pytorch_lightning.logging import LightningLoggerBase, rank_zero_only
import tests.utils as tutils
def test_testtube_logger():
"""
verify that basic functionality of test tube logger works
"""
tutils.reset_seed()
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
logger = tutils.get_test_tube_logger(False)
trainer_options = dict(
max_nb_epochs=1,
train_percent_check=0.01,
logger=logger
)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result == 1, "Training failed"
tutils.clear_save_dir()
def test_testtube_pickle():
"""
Verify that pickling a trainer containing a test tube logger works
"""
tutils.reset_seed()
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
logger = tutils.get_test_tube_logger(False)
logger.log_hyperparams(hparams)
logger.save()
trainer_options = dict(
max_nb_epochs=1,
train_percent_check=0.01,
logger=logger
)
trainer = Trainer(**trainer_options)
pkl_bytes = pickle.dumps(trainer)
trainer2 = pickle.loads(pkl_bytes)
trainer2.logger.log_metrics({"acc": 1.0})
tutils.clear_save_dir()
def test_mlflow_logger():
"""
verify that basic functionality of mlflow logger works
"""
tutils.reset_seed()
try:
from pytorch_lightning.logging import MLFlowLogger
except ModuleNotFoundError:
return
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
mlflow_dir = os.path.join(root_dir, "mlruns")
logger = MLFlowLogger("test", f"file://{mlflow_dir}")
trainer_options = dict(
max_nb_epochs=1,
train_percent_check=0.01,
logger=logger
)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
print('result finished')
assert result == 1, "Training failed"
tutils.clear_save_dir()
def test_mlflow_pickle():
"""
verify that pickling trainer with mlflow logger works
"""
tutils.reset_seed()
try:
from pytorch_lightning.logging import MLFlowLogger
except ModuleNotFoundError:
return
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
mlflow_dir = os.path.join(root_dir, "mlruns")
logger = MLFlowLogger("test", f"file://{mlflow_dir}")
trainer_options = dict(
max_nb_epochs=1,
logger=logger
)
trainer = Trainer(**trainer_options)
pkl_bytes = pickle.dumps(trainer)
trainer2 = pickle.loads(pkl_bytes)
trainer2.logger.log_metrics({"acc": 1.0})
tutils.clear_save_dir()
def test_comet_logger():
"""
verify that basic functionality of Comet.ml logger works
"""
tutils.reset_seed()
try:
from pytorch_lightning.logging import CometLogger
except ModuleNotFoundError:
return
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
comet_dir = os.path.join(root_dir, "cometruns")
# We test CometLogger in offline mode with local saves
logger = CometLogger(
save_dir=comet_dir,
project_name="general",
workspace="dummy-test",
)
trainer_options = dict(
max_nb_epochs=1,
train_percent_check=0.01,
logger=logger
)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
print('result finished')
assert result == 1, "Training failed"
tutils.clear_save_dir()
def test_comet_pickle():
"""
verify that pickling trainer with comet logger works
"""
tutils.reset_seed()
try:
from pytorch_lightning.logging import CometLogger
except ModuleNotFoundError:
return
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
comet_dir = os.path.join(root_dir, "cometruns")
# We test CometLogger in offline mode with local saves
logger = CometLogger(
save_dir=comet_dir,
project_name="general",
workspace="dummy-test",
)
trainer_options = dict(
max_nb_epochs=1,
logger=logger
)
trainer = Trainer(**trainer_options)
pkl_bytes = pickle.dumps(trainer)
trainer2 = pickle.loads(pkl_bytes)
trainer2.logger.log_metrics({"acc": 1.0})
tutils.clear_save_dir()
def test_custom_logger(tmpdir):
class CustomLogger(LightningLoggerBase):
def __init__(self):
super().__init__()
self.hparams_logged = None
self.metrics_logged = None
self.finalized = False
@rank_zero_only
def log_hyperparams(self, params):
self.hparams_logged = params
@rank_zero_only
def log_metrics(self, metrics, step_num):
self.metrics_logged = metrics
@rank_zero_only
def finalize(self, status):
self.finalized_status = status
@property
def name(self):
return "name"
@property
def version(self):
return "1"
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
logger = CustomLogger()
trainer_options = dict(
max_nb_epochs=1,
train_percent_check=0.01,
logger=logger,
default_save_path=tmpdir
)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result == 1, "Training failed"
assert logger.hparams_logged == hparams
assert logger.metrics_logged != {}
assert logger.finalized_status == "success"
| 23.585938
| 73
| 0.661477
|
4a0f9c3fedd16990e11fe36e819f9e77c3f8f398
| 6,669
|
py
|
Python
|
results/migrations/0016_add_results_14_data.py
|
kinaklub/next.filmfest.by
|
b537c0d2dac4195e9e7b460c569007d20a5954e7
|
[
"Unlicense"
] | 7
|
2016-07-18T07:37:37.000Z
|
2022-03-23T08:12:04.000Z
|
results/migrations/0016_add_results_14_data.py
|
kinaklub/next.filmfest.by
|
b537c0d2dac4195e9e7b460c569007d20a5954e7
|
[
"Unlicense"
] | 119
|
2015-11-08T07:16:44.000Z
|
2022-03-11T23:25:53.000Z
|
results/migrations/0016_add_results_14_data.py
|
kinaklub/next.filmfest.by
|
b537c0d2dac4195e9e7b460c569007d20a5954e7
|
[
"Unlicense"
] | 3
|
2016-07-21T17:22:31.000Z
|
2016-10-04T08:38:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import json
from itertools import chain
from django.core.files import File
from django.db import migrations
from django.utils.text import slugify
from cpm_generic.migration_utils import (add_subpage, get_content_type,
get_image_model, remove_subpage)
MIGRATION_DIR = os.path.dirname(__file__)
def get_jury_data():
jury_json = os.path.join(
MIGRATION_DIR,
'0016_add_results_14_data/jury.json'
)
return json.load(open(jury_json, 'rb'), 'utf8')
def add_jury_member_pages(apps, schema_editor):
Image = get_image_model(apps)
IndexPage = apps.get_model('cpm_generic.IndexPage')
JuryMemberPage = apps.get_model("results.JuryMemberPage")
Collection = apps.get_model('wagtailcore.Collection')
jury_member_page_ct = get_content_type(apps, 'results', 'jurymemberpage')
collection_id = Collection.objects.filter(depth=1)[0]
juryindex_page = IndexPage.objects.get(slug='jury')
for item in get_jury_data():
photo = Image(title=item['title'], collection=collection_id)
photo_file = os.path.join(MIGRATION_DIR, item['photo'])
photo.file.save(
name=item['title'] + os.extsep + item['photo_ext'],
content=File(open(photo_file, 'rb'))
)
photo.save()
slug = slugify(item['title'])
add_subpage(
parent=juryindex_page,
model=JuryMemberPage,
title=item['title'],
slug=slug,
name_en=item['name_en'],
name_be=item['name_be'],
name_ru=item['name_ru'],
info_en=item['info_en'],
info_be=item['info_be'],
info_ru=item['info_ru'],
country=item['country'],
photo=photo,
content_type=jury_member_page_ct,
)
def _add_year_results(apps, page_kwargs, jury_members):
HomePage = apps.get_model('home.HomePage')
JuryMemberPage = apps.get_model("results.JuryMemberPage")
RelatedJuryMember = apps.get_model('results.ResultsRelatedJuryMember')
ResultsPage = apps.get_model('results.ResultsPage')
results_page_ct = get_content_type(apps, 'results', 'resultspage')
homepage = HomePage.objects.get(slug='home')
year_results_page = add_subpage(
homepage,
ResultsPage,
content_type=results_page_ct,
**page_kwargs
)
RelatedJuryMember.objects.bulk_create(
[
RelatedJuryMember(
sort_order=index,
jury_member=JuryMemberPage.objects.get(title=title),
page=year_results_page,
) for index, title in enumerate(jury_members)
]
)
def _get_jurymember_kw(item):
return {
'title': item['title'],
'slug': slugify(item['title']),
'name_en': item['name_en'],
'name_be': item['name_be'],
'name_ru': item['name_ru'],
'info_en': item['info_en'],
'info_be': item['info_be'],
'info_ru': item['info_ru'],
'country': item['country'],
}
def remove_jury_member_pages(apps, schema_editor):
Image = get_image_model(apps)
Collection = apps.get_model('wagtailcore.Collection')
IndexPage = apps.get_model('cpm_generic.IndexPage')
JuryMemberPage = apps.get_model("results.JuryMemberPage")
jury_member_page_ct = get_content_type(apps, 'results', 'jurymemberpage')
collection_id = Collection.objects.filter(depth=1)[0]
juryindex_page = IndexPage.objects.get(slug='jury')
for item in get_jury_data():
photo = Image.objects.get(title=item['title'],
collection=collection_id)
photo.delete()
remove_subpage(
parent=juryindex_page,
model=JuryMemberPage,
content_type=jury_member_page_ct,
**_get_jurymember_kw(item)
)
def add_results_2014(apps, schema_editor):
_add_year_results(
apps,
dict(
title=u'Results 2014',
slug='results2014',
caption_en='2014: good memories',
caption_be='2014: добрыя ўспаміны',
caption_ru='2014: хорошие воспоминания',
),
[
'Yuri Igrusha',
'Valentyna Zalevska',
'Goh Choon Ean',
'Alexei Tutkin',
'Carin Bräck',
'Lidia Mikheeva',
'Youlian Tabakov',
'David Roberts',
'Filmgruppe Chaos',
'Pierre-Luc Vaillancourt - 2',
'Christophe Beaucourt',
]
)
def _remove_year_results(apps, page_kwargs, jury_members):
HomePage = apps.get_model('home.HomePage')
JuryMemberPage = apps.get_model("results.JuryMemberPage")
RelatedJuryMember = apps.get_model('results.ResultsRelatedJuryMember')
ResultsPage = apps.get_model('results.ResultsPage')
results_page_ct = get_content_type(apps, 'results', 'resultspage')
homepage = HomePage.objects.get(slug='home')
results12_page = remove_subpage(
homepage,
ResultsPage,
content_type=results_page_ct,
**page_kwargs
)
related_jury_ids = chain.from_iterable(
RelatedJuryMember.objects.filter(
jury_member=JuryMemberPage.objects.get(title=title),
page=results12_page,
).values_list('id', flat=True) for title in jury_members
)
RelatedJuryMember.objects.filter(id__in=related_jury_ids).delete()
def _get_data_2014():
page_kwargs = dict(
title=u'Results 2014',
slug='results2014',
caption_en='2014: good memories',
caption_be='2014: добрыя ўспаміны',
caption_ru='2014: хорошие воспоминания',
)
jury_members = [
'Yuri Igrusha',
'Valentyna Zalevska',
'Goh Choon Ean',
'Alexei Tutkin',
'Carin Bräck',
'Lidia Mikheeva',
'Youlian Tabakov',
'David Roberts',
'Filmgruppe Chaos',
'Pierre-Luc Vaillancourt - 2',
'Christophe Beaucourt',
]
return page_kwargs, jury_members
def remove_results_2014(apps, schema_editor):
page_kwargs, jury_members = _get_data_2014()
_remove_year_results(apps, page_kwargs, jury_members)
class Migration(migrations.Migration):
dependencies = [
('results', '0015_resultsrelatedpartner'),
('wagtailimages', '0011_image_collection'),
]
operations = [
migrations.RunPython(add_jury_member_pages, remove_jury_member_pages),
migrations.RunPython(add_results_2014, remove_results_2014),
]
| 30.040541
| 78
| 0.62948
|
4a0f9d2958c0b867262f9493f3be9115fe98f4d3
| 7,871
|
py
|
Python
|
CHECLabPy/utils/resolutions.py
|
sstcam/CHECLabPy
|
c67bf0b190ba4b799d4da150591d602e16b1d6b0
|
[
"BSD-3-Clause"
] | 4
|
2018-04-23T09:14:21.000Z
|
2019-05-02T22:12:47.000Z
|
CHECLabPy/utils/resolutions.py
|
watsonjj/CHECLabPy
|
c67bf0b190ba4b799d4da150591d602e16b1d6b0
|
[
"BSD-3-Clause"
] | 28
|
2018-03-29T21:50:45.000Z
|
2019-11-12T07:51:01.000Z
|
CHECLabPy/utils/resolutions.py
|
watsonjj/CHECLabPy
|
c67bf0b190ba4b799d4da150591d602e16b1d6b0
|
[
"BSD-3-Clause"
] | 16
|
2018-03-23T15:29:38.000Z
|
2019-07-24T12:19:51.000Z
|
import numpy as np
import pandas as pd
class ChargeResolution:
def __init__(self, mc_true):
"""
Calculates the charge resolution with an efficient, low-memory,
interative approach, allowing the contribution of data/events
without reading the entire dataset into memory.
Utilises Pandas DataFrames, and makes no assumptions on the order of
the data, and does not require the true charge to be integer (as may
be the case for lab measurements where an average illumination
is used).
A list is filled with a dataframe for each contribution, and only
amalgamated into a single dataframe (reducing memory) once the memory
of the list becomes large (or at the end of the filling),
reducing the time required to produce the output.
Parameters
----------
mc_true : bool
Indicate if the "true charge" values are from the sim_telarray
files, and therefore without poisson error. The poisson error will
therefore be included in the charge resolution calculation.
Attributes
----------
self._mc_true : bool
self._df_list : list
self._df : pd.DataFrame
self._n_bytes : int
Monitors the number of bytes being held in memory
"""
self._mc_true = mc_true
self._df_list = []
self._df = pd.DataFrame()
self._n_bytes = 0
self._max_bytes = 1E9
@staticmethod
def rmse_abs(sum_, n):
return np.sqrt(sum_ / n)
@staticmethod
def rmse(true, sum_, n):
return ChargeResolution.rmse_abs(sum_, n) / np.abs(true)
@staticmethod
def charge_res_abs(true, sum_, n):
return np.sqrt((sum_ / n) + true)
@staticmethod
def charge_res(true, sum_, n):
return ChargeResolution.charge_res_abs(true, sum_, n) / np.abs(true)
def add(self, pixel, true, measured):
"""
Contribute additional values to the Charge Resolution
Parameters
----------
pixel : ndarray
1D array containing the pixel for each entry
true : ndarray
1D array containing the true charge for each entry
measured : ndarray
1D array containing the measured charge for each entry
"""
diff2 = np.power(measured - true, 2)
df = pd.DataFrame(dict(
pixel=pixel,
true=true,
sum=diff2,
n=np.uint32(1)
))
self._df_list.append(df)
self._n_bytes += df.memory_usage(index=True, deep=True).sum()
if self._n_bytes > self._max_bytes:
self._amalgamate()
def _amalgamate(self):
"""
Concatenate the dataframes inside the list, and sum together
values per pixel and true charge in order to reduce memory use.
"""
self._df = pd.concat([self._df, *self._df_list], ignore_index=True)
self._df = self._df.groupby(['pixel', 'true']).sum().reset_index()
self._n_bytes = 0
self._df_list = []
def finish(self):
"""
Perform the final amalgamation, and calculate the charge resolution
from the resulting sums
Returns
-------
df_p : pd.DataFrame
Dataframe containing the charge resolution per pixel
df_c : pd.DataFrame
Dataframe containing the charge resolution for the entire camera
"""
self._amalgamate()
self._df = self._df.loc[self._df['true'] != 0]
df_p = self._df.copy()
true = df_p['true'].values
sum_ = df_p['sum'].values
n = df_p['n'].values
if self._mc_true:
df_p['charge_resolution'] = self.charge_res(true, sum_, n)
df_p['charge_resolution_abs'] = self.charge_res_abs(true, sum_, n)
else:
df_p['charge_resolution'] = self.rmse(true, sum_, n)
df_p['charge_resolution_abs'] = self.rmse_abs(sum_, n)
df_c = self._df.copy().groupby('true').sum().reset_index()
df_c = df_c.drop(columns='pixel')
true = df_c['true'].values
sum_ = df_c['sum'].values
n = df_c['n'].values
if self._mc_true:
df_c['charge_resolution'] = self.charge_res(true, sum_, n)
df_c['charge_resolution_abs'] = self.charge_res_abs(true, sum_, n)
else:
df_c['charge_resolution'] = self.rmse(true, sum_, n)
df_c['charge_resolution_abs'] = self.rmse_abs(sum_, n)
return df_p, df_c
class ChargeStatistics:
def __init__(self):
"""
Calculates the charge statistics with an efficient, low-memory,
interative approach, allowing the contribution of data/events
without reading the entire dataset into memory.
Utilises Pandas DataFrames, and makes no assumptions on the order of
the data.
A list is filled with a dataframe for each contribution, and only
amalgamated into a single dataframe (reducing memory) once the memory
of the list becomes large (or at the end of the filling),
reducing the time required to produce the output.
Attributes
----------
self._df_list : list
self._df : pd.DataFrame
self._n_bytes : int
Monitors the number of bytes being held in memory
"""
self._df_list = []
self._df = pd.DataFrame()
self._n_bytes = 0
self._max_bytes = 1E9
def add(self, pixel, amplitude, charge):
"""
Contribute additional values to the statistics
Parameters
----------
pixel : ndarray
1D array containing the pixel for each entry
amplitude : ndarray
1D array containing the input amplitude for each entry
charge : ndarray
1D array containing the measured charge for each entry
"""
df = pd.DataFrame(dict(
pixel=pixel,
amplitude=amplitude,
sum=charge,
sum2=charge**2,
n=np.uint32(1)
))
self._df_list.append(df)
self._n_bytes += df.memory_usage(index=True, deep=True).sum()
if self._n_bytes > self._max_bytes:
self._amalgamate()
def _amalgamate(self):
"""
Concatenate the dataframes inside the list, and sum together
values per pixel and true charge in order to reduce memory use.
"""
self._df = pd.concat([self._df, *self._df_list], ignore_index=True)
self._df = self._df.groupby(['pixel', 'amplitude']).sum().reset_index()
self._n_bytes = 0
self._df_list = []
def finish(self):
"""
Perform the final amalgamation, and calculate the charge statistics
from the resulting sums
Returns
-------
df_p : pd.DataFrame
Dataframe containing the charge statistics per pixel
df_c : pd.DataFrame
Dataframe containing the charge statistics for the entire camera
"""
self._amalgamate()
df_p = self._df.copy()
sum_ = df_p['sum'].values
sum2 = df_p['sum2'].values
n = df_p['n'].values
mean = sum_ / n
std = np.sqrt((sum2 / n) - (mean**2))
df_p['mean'] = mean
df_p['std'] = std
df_c = self._df.copy().groupby('amplitude').sum().reset_index()
df_c = df_c.drop(columns='pixel')
sum_ = df_c['sum'].values
sum2 = df_c['sum2'].values
n = df_c['n'].values
mean = sum_ / n
std = np.sqrt((sum2 / n) - (mean**2))
df_c['mean'] = mean
df_c['std'] = std
return df_p, df_c
class IntensityResolution(ChargeResolution):
pass
class IntensityStatistics(ChargeStatistics):
pass
| 33.351695
| 79
| 0.590522
|
4a0f9df83bb819ae5fdcd244f785a5ab6ceb9beb
| 4,394
|
py
|
Python
|
src/main/python/rlbot/agents/executable_with_socket_agent.py
|
Arik/RLBot
|
80bed2af67a4979b25abba012ef84924e74bfbe1
|
[
"MIT"
] | 3
|
2019-08-04T20:21:30.000Z
|
2019-09-23T14:06:14.000Z
|
src/main/python/rlbot/agents/executable_with_socket_agent.py
|
Arik/RLBot
|
80bed2af67a4979b25abba012ef84924e74bfbe1
|
[
"MIT"
] | null | null | null |
src/main/python/rlbot/agents/executable_with_socket_agent.py
|
Arik/RLBot
|
80bed2af67a4979b25abba012ef84924e74bfbe1
|
[
"MIT"
] | 1
|
2019-09-02T06:48:33.000Z
|
2019-09-02T06:48:33.000Z
|
import os
import socket
import time
import psutil
from rlbot.agents.base_independent_agent import BaseIndependentAgent
from rlbot.botmanager.helper_process_request import HelperProcessRequest
from rlbot.utils.logging_utils import get_logger
from rlbot.utils.structures import game_interface
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.structures.game_interface import GameInterface
class ExecutableWithSocketAgent(BaseIndependentAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.logger = get_logger('ExeSocket' + str(self.index))
self.is_retired = False
self.executable_path = None
self.game_interface = GameInterface(self.logger)
self.game_tick_packet = GameTickPacket()
self.spawn_id_seen = False
def run_independently(self, terminate_request_event):
self.game_interface.load_interface()
while not terminate_request_event.is_set():
self.game_interface.update_live_data_packet(self.game_tick_packet)
packet_spawn_id = self.game_tick_packet.game_cars[self.index].spawn_id
if self.spawn_id_seen:
if packet_spawn_id != self.spawn_id:
break # This will cause the bot to retire.
elif packet_spawn_id == self.spawn_id and self.game_tick_packet.game_info.is_round_active:
self.spawn_id_seen = True
# Continuously make sure the the bot is registered.
# These functions can be called repeatedly without any bad effects.
# This is useful for re-engaging the socket server if it gets restarted during development.
message = self.build_add_command()
self.send_command(message)
time.sleep(1)
def get_helper_process_request(self):
if self.is_executable_configured():
return HelperProcessRequest(python_file_path=None, key=__file__ + str(self.get_port()),
executable=self.executable_path, exe_args=[str(self.get_port())],
current_working_directory=os.path.dirname(self.executable_path))
return None
def retire(self):
message = self.build_retire_command()
self.logger.info(f"Sending retire message for {self.name}")
self.send_command(message)
self.is_retired = True
def build_add_command(self) -> str:
return f"add\n{self.name}\n{self.team}\n{self.index}\n{game_interface.get_dll_directory()}"
def build_retire_command(self) -> str:
return f"remove\n{self.index}"
def send_command(self, message):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(4)
s.connect(("127.0.0.1", self.get_port()))
s.send(bytes(message, "ASCII"))
s.close()
return True
except ConnectionRefusedError:
self.logger.warn("Could not connect to server!")
return False
def is_executable_configured(self):
return self.executable_path is not None and os.path.isfile(self.executable_path)
def get_extra_pids(self):
"""
Gets the list of process ids that should be marked as high priority.
:return: A list of process ids that are used by this bot in addition to the ones inside the python process.
"""
if self.is_executable_configured():
# The helper process will start the exe and report the PID. Nothing to do here.
return []
while not self.is_retired:
for proc in psutil.process_iter():
for conn in proc.connections():
if conn.laddr.port == self.get_port():
self.logger.debug(f'server for {self.name} appears to have pid {proc.pid}')
return [proc.pid]
time.sleep(1)
if self.executable_path is None:
self.logger.info(
"Can't auto-start because no executable is configured. Please start manually!")
else:
self.logger.info(f"Can't auto-start because {self.executable_path} is not found. "
"Please start manually!")
def get_port(self) -> int:
raise NotImplementedError
| 41.847619
| 115
| 0.645198
|
4a0f9f79e017dc658686c8cfad5e5f37268291ac
| 22,454
|
py
|
Python
|
kumex/marke_data/market_data.py
|
grape-cola/kumex-python-sdk-1
|
d43b15ff1c002d65fc68ea12d21ec32b49cf18fd
|
[
"MIT"
] | 14
|
2019-10-12T08:01:11.000Z
|
2021-11-16T23:48:52.000Z
|
kumex/marke_data/market_data.py
|
grape-cola/kumex-python-sdk-1
|
d43b15ff1c002d65fc68ea12d21ec32b49cf18fd
|
[
"MIT"
] | 2
|
2020-03-23T18:14:20.000Z
|
2021-08-28T05:28:55.000Z
|
kumex/marke_data/market_data.py
|
grape-cola/kumex-python-sdk-1
|
d43b15ff1c002d65fc68ea12d21ec32b49cf18fd
|
[
"MIT"
] | 6
|
2019-10-12T07:09:51.000Z
|
2021-07-13T16:16:13.000Z
|
from kumex.base_request.base_request import KumexBaseRestApi
class MarketData(KumexBaseRestApi):
def get_server_timestamp(self):
"""
https://docs.kumex.com/#server-time
get server timestamp
:return: 1570609496404
"""
return self._request('GET', '/api/v1/timestamp', auth=False)
def get_interest_rate(self, symbol, startAt=None, endAt=None, reverse=True, offset=0, forward=True, maxCount=10):
"""
https://docs.kumex.com/#get-interest-rate-list
:param symbol: interest symbol (Mandatory)
:type: str
:param startAt: start time(milisecond) (optional)
:type: int
:param endAt: end time(milisecond) (optional)
:type: int
:param reverse: is reverse? (optional)
:type: bool
:param offset: Start offset. The unique attribute of the last returned result of the last request. The data of (optional)
the first page will be returned by default
:type: int
:param forward: his parameter functions to judge whether the lookup is forward or not (optional)
:type: bool
:param maxCount:Max record count (optional)
:type : int
:return:
{'dataList':
[
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611840000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611780000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611720000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611660000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611600000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611540000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611480000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611420000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611360000, 'value': 0.0003},
{'symbol': '.XBTINT', 'granularity': 60000, 'timePoint': 1570611300000, 'value': 0.0003}
],
'hasMore': True}
"""
params = {'symbol': symbol}
if startAt:
params['startAt'] = startAt
if endAt:
params['endAt'] = endAt
if reverse:
params['reverse'] = reverse
if offset:
params['offset'] = offset
if forward:
params['forward'] = forward
if maxCount:
params['maxCount'] = maxCount
return self._request('GET', '/api/v1/interest/query', auth=False, params=params)
def get_index_list(self, symbol, startAt=None, endAt=None, reverse=True, offset=0, forward=True, maxCount=10):
"""
https://docs.kumex.com/#get-index-list
:param symbol: interest symbol (Mandatory)
:type: str
:param startAt: start time(milisecond) (optional)
:type: int
:param endAt: end time(milisecond) (optional)
:type: int
:param reverse: is reverse? (optional)
:type: bool
:param offset: Start offset. The unique attribute of the last returned result of the last request. The data of (optional)
the first page will be returned by default
:type: int
:param forward: his parameter functions to judge whether the lookup is forward or not (optional)
:type: bool
:param maxCount:Max record count (optional)
:type : int
:return:
{'dataList':
[
{'symbol': '.BXBT','decomposionList': [
{'price': 8214.62, 'weight': 0.05746732, 'exchange': 'gemini'},
{'price': 8212.4, 'weight': 0.1896515, 'exchange': 'kraken'},
{'price': 8206.35, 'weight': 0.43039379, 'exchange': 'coinbase'},
{'price': 8221.60243, 'weight': 0.03994456, 'exchange': 'liquid'},
{'price': 8211.981, 'weight': 0.02333998, 'exchange': 'bittrex'}, {
'price': 8206.47, 'weight': 0.25920285, 'exchange': 'bitstamp'}],
'granularity': 5000, 'timePoint': 1570612465000, 'value': 8208.74},
{'symbol': '.BXBT','decomposionList': [
{'price': 8214.62, 'weight': 0.05746732, 'exchange': 'gemini'},
{'price': 8212.4, 'weight': 0.1896515, 'exchange': 'kraken'},
{'price': 8208.98, 'weight': 0.43039379, 'exchange': 'coinbase'},
{'price': 8221.60243, 'weight': 0.03994456, 'exchange': 'liquid'},
{'price': 8211.981, 'weight': 0.02333998, 'exchange': 'bittrex'},
{'price': 8207.47, 'weight': 0.25920285, 'exchange': 'bitstamp'}],
'granularity': 5000, 'timePoint': 1570612460000, 'value': 8210.14},
{......
],
'hasMore': True}
"""
params = {'symbol': symbol}
if startAt:
params['startAt'] = startAt
if endAt:
params['endAt'] = endAt
if reverse:
params['reverse'] = reverse
if offset:
params['offset'] = offset
if forward:
params['forward'] = forward
if maxCount:
params['maxCount'] = maxCount
return self._request('GET', '/api/v1/index/query', auth=False, params=params)
def get_current_mark_price(self, symbol):
"""
https://docs.kumex.com/#get-current-mark-price
:param symbol:
:type: str
:return: {'symbol': 'XBTUSDM', 'indexPrice': 8194.22, 'granularity': 5000, 'timePoint': 1570613025000, 'value': 8194.49}
"""
return self._request('GET', '/api/v1/mark-price/{symbol}/current'.format(symbol=symbol), auth=False)
def get_premium_index(self, symbol, startAt=None, endAt=None, reverse=True, offset=0, forward=True, maxCount=10):
"""
https://docs.kumex.com/#get-premium-index
:param symbol: interest symbol (Mandatory)
:type: str
:param startAt: start time(milisecond) (optional)
:type: int
:param endAt: end time(milisecond) (optional)
:type: int
:param reverse: is reverse? (optional)
:type: bool
:param offset: Start offset. The unique attribute of the last returned result of the last request. The data of (optional)
the first page will be returned by default
:type: int
:param forward: his parameter functions to judge whether the lookup is forward or not (optional)
:type: bool
:param maxCount:Max record count (optional)
:type : int
:return:
{
"dataList": [
{
"symbol": ".XBTUSDMPI", //Premium index symbol
"granularity": 60000, //Granularity (milisecond)
"timePoint": 1558000320000, //Time point (milisecond)
"value": 0.022585 //Premium index
},
{
"symbol": ".XBTUSDMPI",
"granularity": 60000,
"timePoint": 1558000260000,
"value": 0.022611
},
......
],
"hasMore": true //Whether there are more pages
}
"""
params = {'symbol': symbol}
if startAt:
params['startAt'] = startAt
if endAt:
params['endAt'] = endAt
if reverse:
params['reverse'] = reverse
if offset:
params['offset'] = offset
if forward:
params['forward'] = forward
if maxCount:
params['maxCount'] = maxCount
return self._request('GET', '/api/v1/premium/query', auth=False, params=params)
def get_current_fund_rate(self, symbol):
"""
https://docs.kumex.com/#get-current-funding-rate
:param symbol: type str (Mandatory)
:return:
{
"symbol": ".XBTUSDMFPI8H", //Funding Rate Symbol
"granularity": 28800000, //Granularity (milisecond)
"timePoint": 1558000800000, //Time point (milisecond)
"value": 0.00375, //Funding rate
"predictedValue": 0.00375 //Predicted funding rate
}
"""
return self._request('GET', '/api/v1/funding-rate/{symbol}/current'.format(symbol=symbol), auth=False)
def get_trade_history(self, symbol):
"""
https://docs.kumex.com/#transaction-history
:param symbol: type str (Mandatory)
:return:
[{
"sequence": 102, //Sequence number
"tradeId": "5cbd7377a6ffab0c7ba98b26", //Transaction ID
"takerOrderId": "5cbd7377a6ffab0c7ba98b27", //Taker order ID
"makerOrderId": "5cbd7377a6ffab0c7ba98b28", //Maker order ID
"price": "7000.0", //Filled price
"size": 0.1, //Filled quantity
"side": "buy", //Side-taker
"ts": 1545904567062140823 //Filled time - nanosecond
},
.......]
"""
params = {
'symbol': symbol
}
return self._request('GET', '/api/v1/trade/history', auth=False, params=params)
def l2_order_book(self, symbol):
"""
https://docs.kumex.com/#get-full-order-book-level-2
:param symbol: type tar (Mandatory)
:return:
{
"symbol": "XBTUSDM", //Symbol
"sequence": 100, //Ticker sequence number
"asks": [
["5000.0", 1000], //Price, quantity
["6000.0", 1983], //Price, quantity
......
],
"bids": [
["3200.0", 800], //Price, quantity
["3100.0", 100], //Price, quantity
......
]
}
"""
params = {
"symbol": symbol
}
return self._request('GET', '/api/v1/level2/snapshot', auth=False, params=params)
def l2_part_order_book(self, symbol, depth=20):
"""
https://docs.kucoin.com/futures/#get-part-order-book-level-2
:param symbol: type tar (Mandatory)
:return:
{
"code": "200000",
"data": {
"symbol": "XBTUSDM", //Symbol
"sequence": 100, //Ticker sequence number
"asks": [
["5000.0", 1000], //Price, quantity
["6000.0", 1983] //Price, quantity
],
"bids": [
["3200.0", 800], //Price, quantity
["3100.0", 100] //Price, quantity
]
}
}
"""
params = {
"symbol": symbol
}
return self._request('GET', f'/api/v1/level2/depth{depth}', auth=False, params=params)
def get_l2_messages(self, symbol, start, end):
"""
:param symbol: type tar (Mandatory)
:type: str
:param start: Start sequence number (included in the returned data) (Mandatory)
:type: int
:param end: End sequence number (included in the returned data) (Mandatory)
:type: int
:return:
"""
params = {
'symbol': symbol,
'start': start,
'end': end
}
return self._request('GET', '/api/v1/level2/message/query', auth=False, params=params)
def l3_order_book(self, symbol):
"""
https://docs.kumex.com/#get-full-order-book-level-3
:param symbol: type tar (Mandatory)
:return:
{
"code": "200000",
"data": {
"symbol": "XBTUSDM", //Symbol
"sequence": 100, //The sequence number of the last received message in building a Level 3 order book
"bids": [[5567483701231, "dfa123124", "123.12312", 10, 5567483701231], ...], //Selling data: order placing time - nanosecond, order ID, price, quantity, time at which the order enters the order book - nanosecond
"asks": [[5567483701231, "dfa123124", "123.12312", 10, 5567483701231], ...] //Buying data: order placing time - nanosecond, order ID, price, quantity, time at which the order enters the order book- nanosecond
}
}
"""
params = {
"symbol": symbol
}
return self._request('GET', '/api/v1/level3/snapshot', auth=False, params=params)
def l3_order_book_v2(self, symbol):
"""
https://docs.kucoin.com/futures/#get-full-order-book-level-3-v2
:param symbol: type tar (Mandatory)
:return:
{
"code": "200000",
"data": {
"symbol": "XBTUSDM", //Symbol
"sequence": 100, //The sequence number of the last received message in building a Level 3 order book
"bids": [[5567483701231, "dfa123124", "123.12312", 10, 5567483701231], ...], //Selling data: order placing time - nanosecond, order ID, price, quantity, time at which the order enters the order book - nanosecond
"asks": [[5567483701231, "dfa123124", "123.12312", 10, 5567483701231], ...] //Buying data: order placing time - nanosecond, order ID, price, quantity, time at which the order enters the order book- nanosecond
}
}
"""
params = {
"symbol": symbol
}
return self._request('GET', '/api/v2/level3/snapshot', auth=False, params=params)
def get_l3_messages(self, symbol, start, end):
"""
:param symbol: type tar (Mandatory)
:type: str
:param start: Start sequence number (included in the returned data) (Mandatory)
:type: int
:param end: End sequence number (included in the returned data) (Mandatory)
:type: int
:return:
[
{
"symbol": "XBTUSDM", //Symbol
"sequence": 1, //Message sequence number
"side": "sell", //Order side
"orderTime": 1558074650840002300, //Order placing time
"size": 10, //Order quantity
"orderId": "5cde551aa14a9cad7e454374", //Order ID
"price": "7000.0", //Order price
"type": "open", //Message type
"clientOid": "xxxxxxxxxx", //Optional, this is a user-defined parameter which is used to identify the order
"ts": 1558074652423004000 //Time at which the order enters the order book- nanosecond
},
{
"symbol": "XBTUSDM", //Symbol
"reason": "canceled", //Reason: canceld or filled
"sequence": 2, //Message sequence number
"orderId": "5cde551aa14a9cad7e454374", //Order ID
"type": "done", //Message type
"ts": 1558075303543002400 //Time at which the order is removed- nanosecond
}
]
"""
params = {
'symbol': symbol,
'start': start,
'end': end
}
return self._request('GET', '/api/v1/level3/message/query', auth=False, params=params)
def get_ticker(self, symbol):
"""
https://docs.kumex.com/#get-real-time-ticker
:param symbol: type tar (Mandatory)
:return:
{
"sequence": 1001, //Sequence number
"symbol": "XBTUSDM", //Symbol
"side": "buy", //Side of liquidity taker
"size": 10, //Filled quantity
"price": "7000.0", //Filled price
"bestBidSize": 20, //Best bid size
"bestBidPrice": "7000.0", //Best bid
"bestAskSize": 30, //Best ask size
"bestAskPrice": "7001.0", //Best ask
"tradeId": "5cbd7377a6ffab0c7ba98b26", //Transaction ID
"ts": 1550653727731 //Filled time - nanosecond
}
"""
params = {
'symbol': symbol
}
return self._request('GET', '/api/v1/ticker', auth=False, params=params)
def get_contracts_list(self):
"""
:return:
{
"baseCurrency": "XBT", //Base currency
"fairMethod": "FundingRate", //Fair price marking method
"fundingBaseSymbol": ".XBTINT8H", //Ticker symbol of the based currency
"fundingQuoteSymbol": ".USDINT8H", //Ticker symbol of the quote currency
"fundingRateSymbol": ".XBTUSDMFPI8H", //Funding rate symbol
"indexSymbol": ".BXBT", //Index symbol
"initialMargin": 0.01, //Initial margin requirement
"isDeleverage": true, //Enabled ADL or not
"isInverse": true, //Reverse contract or not
"isQuanto": false, //Whether quanto or not
"lotSize": 1, //Minimum lot size
"maintainMargin": 0.005, //Maintenance margin requirement
"makerFeeRate": -0.00025, //Maker fees
"makerFixFee": -0.0000000200, //Fixed maker fees
"markMethod": "FairPrice", //Marking method
"maxOrderQty": 1000000, //Maximum order quantity
"maxPrice": 1000000.0000000000, //Maximum order price
"maxRiskLimit": 200, //Maximum risk limit (unit: XBT)
"minRiskLimit": 200, //Minimum risk limit (unit: XBT)
"multiplier": -1, //Contract multiplier
"quoteCurrency": "USD", //Quote currency
"riskStep": 100, //Risk limit increment value (unit: XBT)
"rootSymbol": "XBT", //Contract group
"status": "Open", //Contract status
"symbol": "XBTUSDM", //Ticker symbol of the contract
"takerFeeRate": 0.0005, //Taker fees
"takerFixFee": 0.0000000600, //Fixed taker fees
"tickSize": 1, //Minimum price changes
"type": "FFWCSX" //Type of the contract
}
"""
return self._request('GET', '/api/v1/contracts/active', auth=False)
def get_contract_detail(self, symbol):
"""
https://docs.kumex.com/#get-order-info-of-the-contract
:param symbol: type tar (Mandatory)
:return:
{
"baseCurrency": "XBT", //Base currency
"fairMethod": "FundingRate", //Fair price marking method
"fundingBaseSymbol": ".XBTINT8H", //Ticker symbol of the based currency
"fundingQuoteSymbol": ".USDINT8H", //Ticker symbol of the quote currency
"fundingRateSymbol": ".XBTUSDMFPI8H", //Funding rate symbol
"indexSymbol": ".BXBT", //Index symbol
"initialMargin": 0.01, //Initial margin requirement
"isDeleverage": true, //Enabled ADL or not
"isInverse": true, //Reverse contract or not
"isQuanto": false, //Whether quanto or not
"lotSize": 1, //Minimum lot size
"maintainMargin": 0.005, //Maintenance margin requirement
"makerFeeRate": -0.00025, //Maker fees
"makerFixFee": -0.0000000200, //Fixed maker fees
"markMethod": "FairPrice", //Marking method
"maxOrderQty": 1000000, //Maximum order quantity
"maxPrice": 1000000.0000000000, //Maximum order price
"maxRiskLimit": 200, //Maximum risk limit (unit: XBT)
"minRiskLimit": 200, //Minimum risk limit (unit: XBT)
"multiplier": -1, //Contract multiplier
"quoteCurrency": "USD", //Quote currency
"riskStep": 100, //Risk limit increment value (unit: XBT)
"rootSymbol": "XBT", //Contract group
"status": "Open", //Contract status
"symbol": "XBTUSDM", //Ticker symbol of the contract
"takerFeeRate": 0.0005, //Taker fees
"takerFixFee": 0.0000000600, //Fixed taker fees
"tickSize": 1, //Minimum price changes
"type": "FFWCSX" //Type of the contract
}
"""
return self._request('GET', '/api/v1/contracts/{symbol}'.format(symbol=symbol), auth=False)
def get_kline_data(self, symbol, granularity:int, begin_t=None, end_t=None):
"""
https://docs.kucoin.com/futures/#get-k-line-data-of-contract
:param symbol: type tar (Mandatory)
:return:
[
1575331200000,//时间
7495.01, //开盘价
8309.67, //最高价
7250, //最低价
7463.55, //收盘价
0 //成交量
],
[
1575374400000,
7464.37,
8297.85,
7273.02,
7491.44,
0
]
"""
params = {
"symbol": symbol,
"granularity": granularity
}
if begin_t:
params.update({"from": begin_t})
if end_t:
params.update({"to": end_t})
return self._request('GET', '/api/v1/kline/query', auth=False, params=params)
def get_service_status(self):
"""
https://docs.kucoin.com/futures/#get-the-service-status
:param symbol: type tar (Mandatory)
:return:
{
"status": "open", //open, close, cancelonly
"msg": "upgrade match engine" //remark for operation
}
"""
return self._request('GET', '/api/v1/status', auth=False)
| 41.428044
| 227
| 0.515632
|
4a0fa08d8c09fbb77843ecdc1bb2906eab66a045
| 314
|
py
|
Python
|
packages/micropython-official/v1.9.4/esp32/stubs/websocket_helper.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 18
|
2019-07-11T13:31:09.000Z
|
2022-01-27T06:38:40.000Z
|
packages/micropython-official/v1.9.4/esp32/stubs/websocket_helper.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 9
|
2019-09-01T21:44:49.000Z
|
2022-02-04T20:55:08.000Z
|
packages/micropython-official/v1.9.4/esp32/stubs/websocket_helper.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 6
|
2019-10-08T05:31:21.000Z
|
2021-04-22T10:21:01.000Z
|
"""
Module: 'websocket_helper' on esp32 1.9.4
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.9.4', version='v1.9.4 on 2018-05-11', machine='ESP32 module with ESP32')
# Stubber: 1.2.0
DEBUG = 0
binascii = None
def client_handshake():
pass
hashlib = None
def server_handshake():
pass
sys = None
| 19.625
| 126
| 0.66879
|
4a0fa13dac1d762f4ba974acfab969df08dfe692
| 4,378
|
py
|
Python
|
rocketsled/asset.py
|
luhn/rocketsled
|
73924aa0c409787cbfb0ce1d3a335a99a07d3311
|
[
"MIT"
] | null | null | null |
rocketsled/asset.py
|
luhn/rocketsled
|
73924aa0c409787cbfb0ce1d3a335a99a07d3311
|
[
"MIT"
] | null | null | null |
rocketsled/asset.py
|
luhn/rocketsled
|
73924aa0c409787cbfb0ce1d3a335a99a07d3311
|
[
"MIT"
] | null | null | null |
import sys
import re
import os.path
from io import BytesIO
from gzip import GzipFile
import mimetypes
import hashlib
from base64 import urlsafe_b64encode as b64encode
# Python 2/3 imports
if sys.version_info[0] < 3:
from urllib import unquote
from urlparse import urlsplit
else:
from urllib.parse import unquote, urlsplit
class MissingAsset(Exception):
pass
class Asset(object):
"""
An object representing a static asset.
"""
processed = False
_content = None
_encoded = None
def __init__(self, path):
self.path = path
self.headers = {
'Cache-Control': 'max-age=31556926',
}
mtype, encoding = mimetypes.guess_type(path)
if mtype is not None:
self.headers['Content-Type'] = mtype
def process(self, manifest):
"""
Do any processing necessary to prepare the file for upload.
"""
self.processed = True
def encode(self):
"""
Encode the content. (Compression, mainly.)
"""
return self.content
@property
def content(self):
if self._content is not None:
return self._content
with open(self.path, 'rb') as fh:
self._content = fh.read()
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def encoded(self):
if self._encoded is None:
self._encoded = self.encode()
return self._encoded
@property
def filename(self):
hash = hashlib.sha1()
for key, value in sorted(self.headers.items()):
hash.update(key.encode('utf8'))
hash.update(value.encode('utf8'))
hash.update(self.content)
return b64encode(hash.digest()).decode('ascii').rstrip('=')
class CompressedAsset(Asset):
#: Compressable mimetypes, exclude text/* types (which are assumed)
COMPRESSABLE = {
'image/svg+xml',
'application/javascript',
'application/json',
'application/xml',
'application/xhtml+xml',
}
def process(self, manifest):
self.headers['Content-Encoding'] = 'gzip'
super(CompressedAsset, self).process(manifest)
def encode(self):
encoded = super(CompressedAsset, self).encode()
io = BytesIO()
with GzipFile(fileobj=io, mode='wb') as gz:
gz.write(encoded)
return io.getvalue()
class StylesheetAsset(CompressedAsset):
URL_REGEX = re.compile(r'url\([\'"]?([^\'"\)]*)[\'"]?\)', re.I)
def process(self, manifest):
if self.processed:
return
def sub_urls(match):
url = match.group(1)
if(
url.startswith('http://')
or url.startswith('https://')
or url.startswith('data:')
):
return 'url("{}")'.format(url)
path = os.path.normpath(
os.path.join(
os.path.dirname(self.path),
unquote(
urlsplit(url).path
),
)
)
try:
asset = manifest[path]
asset.process(manifest)
# (We know the asset filename is URL safe, no need to quote
return 'url("{}")'.format(asset.filename)
except KeyError:
raise MissingAsset('Could not find "{}" in "{}"'.format(
url, self.path,
))
try:
self.content = self.URL_REGEX.sub(
sub_urls, self.content.decode('utf-8')
).encode('utf-8')
except:
print(self.path)
raise
super(StylesheetAsset, self).process(manifest)
def create_asset_from_path(path):
"""
Given a filepath, create an appropriate asset object.
:param path: The file path.
:type path: str
:returns: The appropriate asset.
:rtype: :class:`Asset`
"""
mtype, _ = mimetypes.guess_type(path)
if mtype is None:
return Asset(path)
elif mtype == 'text/css':
return StylesheetAsset(path)
elif mtype.startswith('text/') or mtype in CompressedAsset.COMPRESSABLE:
return CompressedAsset(path)
else:
return Asset(path)
| 26.059524
| 76
| 0.554363
|
4a0fa1a0c3c15cf05e82c41a695c2a84f1710ab5
| 3,785
|
py
|
Python
|
src/core/settings.py
|
pzandre/django-blog
|
8d37071a4c2eae91066c29713407fc5b929edb96
|
[
"MIT"
] | null | null | null |
src/core/settings.py
|
pzandre/django-blog
|
8d37071a4c2eae91066c29713407fc5b929edb96
|
[
"MIT"
] | null | null | null |
src/core/settings.py
|
pzandre/django-blog
|
8d37071a4c2eae91066c29713407fc5b929edb96
|
[
"MIT"
] | null | null | null |
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from secret_key_generator import secret_key_generator
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
STATIC_ROOT = BASE_DIR / 'static'
MEDIA_ROOT = BASE_DIR / 'media'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = secret_key_generator.generate()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'colorfield',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv("POSTGRES_DB"),
'USER': os.getenv("POSTGRES_USER"),
'PASSWORD': os.getenv("POSTGRES_PASSWORD"),
'HOST': 'pgdb',
'PORT': 5432,
# RDS ONLY
# 'NAME': 'DB_NAME',
# 'USER': 'DB_USERNAME',
# 'HOST': 'DB_HOSTNAME',
# 'PASSWORD': 'DB_PASSWORD',
# 'PORT': 'DB_PORT'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| 25.066225
| 91
| 0.683223
|
4a0fa1f389cf2c015715b5d6fe7837c3b24a4992
| 513
|
py
|
Python
|
cmsplugin_image_rollover/cms_plugins.py
|
divio/cmsplugin-image-rollover
|
5434297c25facac81247f2ccc865b83b2f202d0e
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_image_rollover/cms_plugins.py
|
divio/cmsplugin-image-rollover
|
5434297c25facac81247f2ccc865b83b2f202d0e
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_image_rollover/cms_plugins.py
|
divio/cmsplugin-image-rollover
|
5434297c25facac81247f2ccc865b83b2f202d0e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models import CMSPlugin
class RolloverPlugin(CMSPluginBase):
model = CMSPlugin
name = _("Image rollover")
render_template = 'image_rollover/plugins/image_rollover.html'
allow_children = True
child_classes = ['Bootstrap3ImageCMSPlugin']
plugin_pool.register_plugin(RolloverPlugin)
| 24.428571
| 66
| 0.783626
|
4a0fa28f7effa12830316b8cac4f84ffc4e59d52
| 1,406
|
py
|
Python
|
burrolib/processes/hawkes_process.py
|
kosmitive/burro
|
cc6662bef8693a1c160e80d9482371620deb5304
|
[
"MIT"
] | 5
|
2020-06-25T23:00:26.000Z
|
2020-08-28T02:51:28.000Z
|
burrolib/processes/hawkes_process.py
|
kosmitive/burrolib
|
2b1d690ec9c9371f5b455dd98439dce51d270909
|
[
"MIT"
] | 5
|
2020-11-09T11:43:42.000Z
|
2021-01-09T13:15:09.000Z
|
burrolib/processes/hawkes_process.py
|
kosmitive/burrolib
|
2b1d690ec9c9371f5b455dd98439dce51d270909
|
[
"MIT"
] | null | null | null |
import numpy as np
from burrolib.processes.discrete_point_process import DiscretePointProcess
from burrolib.util.type_checking import is_lambda
class HawkesProcess(DiscretePointProcess):
"""Types of hawkes processes."""
HOMOGENEOUS = 1
INHOMOGENEOUS = 2
def __init__(self, intensity):
"""Creates a new hawkes process.
:param intensity: Can be a float or a function f : R+ -> R+.
"""
# encapsulate intensity in lambda
if isinstance(intensity, float):
self.type = HawkesProcess.HOMOGENEOUS
self.intensity = lambda t: intensity
elif is_lambda(intensity):
self.type = HawkesProcess.INHOMOGENEOUS
self.intensity = intensity
self.arrivals = []
self.eps = 1e-10
self.t = 0
def next(self):
"""Get number of arrivals for next timestep
:return: The number of arrivals
"""
t = self.t
self.t += 1
i = len(self.arrivals)
while t < self.t:
m = self.intensity(t + self.eps)
e = np.exp(m)
t = t + e
u = np.random.uniform(0, m)
if u < self.intensity(t):
if t < self.t:
i += 1
self.arrivals.append(t)
self.arrivals = self.arrivals[i:]
# print("hawkes generated ", i)
return i
| 24.666667
| 74
| 0.555477
|
4a0fa3472b6e445d196f8ef9d8d9a92607463a56
| 22,712
|
py
|
Python
|
scene/scene.py
|
sometimescasey/pca_animations
|
acad802d7273571e47956f74b149b3e78148179f
|
[
"MIT"
] | 1
|
2020-12-02T02:19:03.000Z
|
2020-12-02T02:19:03.000Z
|
scene/scene.py
|
sometimescasey/pca_animations
|
acad802d7273571e47956f74b149b3e78148179f
|
[
"MIT"
] | null | null | null |
scene/scene.py
|
sometimescasey/pca_animations
|
acad802d7273571e47956f74b149b3e78148179f
|
[
"MIT"
] | null | null | null |
import inspect
import itertools as it
import numpy as np
import os
import random
import shutil
import subprocess as sp
import warnings
from tqdm import tqdm as ProgressDisplay
from constants import *
from animation.animation import Animation
from animation.transform import MoveToTarget
from camera.camera import Camera
from continual_animation.continual_animation import ContinualAnimation
from mobject.mobject import Mobject
from utils.iterables import list_update
from utils.output_directory_getters import add_extension_if_not_present
from utils.output_directory_getters import get_movie_output_directory
from utils.output_directory_getters import get_image_output_directory
from container.container import Container
class Scene(Container):
CONFIG = {
"camera_class": Camera,
"camera_config": {},
"frame_duration": LOW_QUALITY_FRAME_DURATION,
"construct_args": [],
"skip_animations": False,
"ignore_waits": False,
"write_to_movie": False,
"save_frames": False,
"save_pngs": False,
"pngs_mode": "RGBA",
"movie_file_extension": ".mp4",
"name": None,
"always_continually_update": False,
"random_seed": 0,
"start_at_animation_number": None,
"end_at_animation_number": None,
}
def __init__(self, **kwargs):
# Perhaps allow passing in a non-empty *mobjects parameter?
Container.__init__(self, **kwargs)
self.camera = self.camera_class(**self.camera_config)
self.mobjects = []
self.continual_animations = []
self.foreground_mobjects = []
self.num_plays = 0
self.saved_frames = []
self.shared_locals = {}
self.frame_num = 0
self.current_scene_time = 0
self.original_skipping_status = self.skip_animations
if self.name is None:
self.name = self.__class__.__name__
if self.random_seed is not None:
random.seed(self.random_seed)
np.random.seed(self.random_seed)
self.setup()
if self.write_to_movie:
self.open_movie_pipe()
try:
self.construct(*self.construct_args)
except EndSceneEarlyException:
pass
# Always tack on one last frame, so that scenes
# with no play calls still display something
self.skip_animations = False
self.wait(self.frame_duration)
if self.write_to_movie:
self.close_movie_pipe()
print("Played a total of %d animations" % self.num_plays)
def setup(self):
"""
This is meant to be implement by any scenes which
are comonly subclassed, and have some common setup
involved before the construct method is called.
"""
pass
def setup_bases(self):
for base in self.__class__.__bases__:
base.setup(self)
def construct(self):
pass # To be implemented in subclasses
def __str__(self):
return self.name
def set_name(self, name):
self.name = name
return self
def set_variables_as_attrs(self, *objects, **newly_named_objects):
"""
This method is slightly hacky, making it a little easier
for certain methods (typically subroutines of construct)
to share local variables.
"""
caller_locals = inspect.currentframe().f_back.f_locals
for key, value in list(caller_locals.items()):
for o in objects:
if value is o:
setattr(self, key, value)
for key, value in list(newly_named_objects.items()):
setattr(self, key, value)
return self
def get_attrs(self, *keys):
return [getattr(self, key) for key in keys]
# Only these methods should touch the camera
def set_camera(self, camera):
self.camera = camera
def get_frame(self):
return np.array(self.camera.get_pixel_array())
def get_image(self):
return self.camera.get_image()
def set_camera_pixel_array(self, pixel_array):
self.camera.set_pixel_array(pixel_array)
def set_camera_background(self, background):
self.camera.set_background(background)
def reset_camera(self):
self.camera.reset()
def capture_mobjects_in_camera(self, mobjects, **kwargs):
self.camera.capture_mobjects(mobjects, **kwargs)
def update_frame(
self,
mobjects=None,
background=None,
include_submobjects=True,
dont_update_when_skipping=True,
**kwargs):
if self.skip_animations and dont_update_when_skipping:
return
if mobjects is None:
mobjects = list_update(
self.mobjects,
self.foreground_mobjects,
)
if background is not None:
self.set_camera_pixel_array(background)
else:
self.reset_camera()
kwargs["include_submobjects"] = include_submobjects
self.capture_mobjects_in_camera(mobjects, **kwargs)
def freeze_background(self):
self.update_frame()
self.set_camera(Camera(self.get_frame()))
self.clear()
###
def continual_update(self, dt):
for mobject in self.get_mobjects():
mobject.update(dt)
for continual_animation in self.continual_animations:
continual_animation.update(dt)
def wind_down(self, *continual_animations, **kwargs):
wind_down_time = kwargs.get("wind_down_time", 1)
for continual_animation in continual_animations:
continual_animation.begin_wind_down(wind_down_time)
self.wait(wind_down_time)
# TODO, this is not done with the remove method so as to
# keep the relevant mobjects. Better way?
self.continual_animations = [ca for ca in self.continual_animations if ca in continual_animations]
def should_continually_update(self):
if self.always_continually_update:
return True
if len(self.continual_animations) > 0:
return True
any_time_based_update = any([
len(m.get_time_based_updaters()) > 0
for m in self.get_mobjects()
])
if any_time_based_update:
return True
return False
###
def get_top_level_mobjects(self):
# Return only those which are not in the family
# of another mobject from the scene
mobjects = self.get_mobjects()
families = [m.get_family() for m in mobjects]
def is_top_level(mobject):
num_families = sum([
(mobject in family)
for family in families
])
return num_families == 1
return list(filter(is_top_level, mobjects))
def get_mobject_family_members(self):
return self.camera.extract_mobject_family_members(self.mobjects)
def separate_mobjects_and_continual_animations(self, mobjects_or_continual_animations):
mobjects = []
continual_animations = []
for item in mobjects_or_continual_animations:
if isinstance(item, Mobject):
mobjects.append(item)
elif isinstance(item, ContinualAnimation):
mobjects.append(item.mobject)
continual_animations.append(item)
else:
raise Exception("""
Adding/Removing something which is
not a Mobject or a ContinualAnimation
""")
return mobjects, continual_animations
def add(self, *mobjects_or_continual_animations):
"""
Mobjects will be displayed, from background to foreground,
in the order with which they are entered.
"""
mobjects, continual_animations = self.separate_mobjects_and_continual_animations(
mobjects_or_continual_animations
)
mobjects += self.foreground_mobjects
self.restructure_mobjects(to_remove=mobjects)
self.mobjects += mobjects
self.continual_animations += continual_animations
return self
def add_mobjects_among(self, values):
"""
So a scene can just add all mobjects it's defined up to that point
by calling add_mobjects_among(locals().values())
"""
mobjects = [x for x in values if isinstance(x, Mobject)]
self.add(*mobjects)
return self
def remove(self, *mobjects_or_continual_animations):
mobjects, continual_animations = self.separate_mobjects_and_continual_animations(
mobjects_or_continual_animations
)
to_remove = self.camera.extract_mobject_family_members(mobjects)
for list_name in "mobjects", "foreground_mobjects":
self.restructure_mobjects(mobjects, list_name, False)
self.continual_animations = [ca for ca in self.continual_animations if ca not in continual_animations and
ca.mobject not in to_remove]
return self
def restructure_mobjects(
self, to_remove,
mobject_list_name="mobjects",
extract_families=True
):
"""
In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one
of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects
will be editing to contain other submobjects, but not m1, e.g. it will now
insert m2 and m3 to where the group once was.
"""
if extract_families:
to_remove = self.camera.extract_mobject_family_members(to_remove)
_list = getattr(self, mobject_list_name)
new_list = self.get_restructured_mobject_list(_list, to_remove)
setattr(self, mobject_list_name, new_list)
return self
def get_restructured_mobject_list(self, mobjects, to_remove):
new_mobjects = []
def add_safe_mobjects_from_list(list_to_examine, set_to_remove):
for mob in list_to_examine:
if mob in set_to_remove:
continue
intersect = set_to_remove.intersection(mob.get_family())
if intersect:
add_safe_mobjects_from_list(mob.submobjects, intersect)
else:
new_mobjects.append(mob)
add_safe_mobjects_from_list(mobjects, set(to_remove))
return new_mobjects
def add_foreground_mobjects(self, *mobjects):
self.foreground_mobjects = list_update(
self.foreground_mobjects,
mobjects
)
self.add(*mobjects)
return self
def add_foreground_mobject(self, mobject):
return self.add_foreground_mobjects(mobject)
def remove_foreground_mobjects(self, *to_remove):
self.restructure_mobjects(to_remove, "foreground_mobjects")
return self
def remove_foreground_mobject(self, mobject):
return self.remove_foreground_mobjects(mobject)
def bring_to_front(self, *mobjects):
self.add(*mobjects)
return self
def bring_to_back(self, *mobjects):
self.remove(*mobjects)
self.mobjects = list(mobjects) + self.mobjects
return self
def clear(self):
self.mobjects = []
self.foreground_mobjects = []
self.continual_animation = []
return self
def get_mobjects(self):
return list(self.mobjects)
def get_mobject_copies(self):
return [m.copy() for m in self.mobjects]
def get_moving_mobjects(self, *animations):
# Go through mobjects from start to end, and
# as soon as there's one that needs updating of
# some kind per frame, return the list from that
# point forward.
animation_mobjects = [anim.mobject for anim in animations]
ca_mobjects = [ca.mobject for ca in self.continual_animations]
mobjects = self.get_mobject_family_members()
for i, mob in enumerate(mobjects):
update_possibilities = [
mob in animation_mobjects,
mob in ca_mobjects,
len(mob.get_updaters()) > 0,
mob in self.foreground_mobjects
]
for possibility in update_possibilities:
if possibility:
return mobjects[i:]
return []
def get_time_progression(self, run_time):
if self.skip_animations:
times = [run_time]
else:
step = self.frame_duration
times = np.arange(0, run_time, step)
time_progression = ProgressDisplay(times)
return time_progression
def get_animation_time_progression(self, animations):
run_time = np.max([animation.run_time for animation in animations])
time_progression = self.get_time_progression(run_time)
time_progression.set_description("".join([
"Animation %d: " % self.num_plays,
str(animations[0]),
(", etc." if len(animations) > 1 else ""),
]))
return time_progression
def compile_play_args_to_animation_list(self, *args):
"""
Each arg can either be an animation, or a mobject method
followed by that methods arguments (and potentially follow
by a dict of kwargs for that method).
This animation list is built by going through the args list,
and each animation is simply added, but when a mobject method
s hit, a MoveToTarget animation is built using the args that
follow up until either another animation is hit, another method
is hit, or the args list runs out.
"""
animations = []
state = {
"curr_method": None,
"last_method": None,
"method_args": [],
}
def compile_method(state):
if state["curr_method"] is None:
return
mobject = state["curr_method"].__self__
if state["last_method"] and state["last_method"].__self__ is mobject:
animations.pop()
# method should already have target then.
else:
mobject.generate_target()
#
if len(state["method_args"]) > 0 and isinstance(state["method_args"][-1], dict):
method_kwargs = state["method_args"].pop()
else:
method_kwargs = {}
state["curr_method"].__func__(
mobject.target,
*state["method_args"],
**method_kwargs
)
animations.append(MoveToTarget(mobject))
state["last_method"] = state["curr_method"]
state["curr_method"] = None
state["method_args"] = []
for arg in args:
if isinstance(arg, Animation):
compile_method(state)
animations.append(arg)
elif inspect.ismethod(arg):
compile_method(state)
state["curr_method"] = arg
elif state["curr_method"] is not None:
state["method_args"].append(arg)
elif isinstance(arg, Mobject):
raise Exception("""
I think you may have invoked a method
you meant to pass in as a Scene.play argument
""")
else:
raise Exception("Invalid play arguments")
compile_method(state)
return animations
def handle_animation_skipping(self):
if self.start_at_animation_number:
if self.num_plays == self.start_at_animation_number:
self.skip_animations = False
if self.end_at_animation_number:
if self.num_plays >= self.end_at_animation_number:
self.skip_animations = True
raise EndSceneEarlyException()
def play(self, *args, **kwargs):
if len(args) == 0:
warnings.warn("Called Scene.play with no animations")
return
self.handle_animation_skipping()
animations = self.compile_play_args_to_animation_list(*args)
for animation in animations:
# This is where kwargs to play like run_time and rate_func
# get applied to all animations
animation.update_config(**kwargs)
# Anything animated that's not already in the
# scene gets added to the scene
if animation.mobject not in self.get_mobject_family_members():
self.add(animation.mobject)
moving_mobjects = self.get_moving_mobjects(*animations)
# Paint all non-moving objects onto the screen, so they don't
# have to be rendered every frame
self.update_frame(excluded_mobjects=moving_mobjects)
static_image = self.get_frame()
total_run_time = 0
for t in self.get_animation_time_progression(animations):
for animation in animations:
animation.update(t / animation.run_time)
self.continual_update(dt=t - total_run_time)
self.update_frame(moving_mobjects, static_image)
self.add_frames(self.get_frame())
total_run_time = t
self.mobjects_from_last_animation = [
anim.mobject for anim in animations
]
self.clean_up_animations(*animations)
if self.skip_animations:
self.continual_update(total_run_time)
else:
self.continual_update(0)
self.num_plays += 1
return self
def clean_up_animations(self, *animations):
for animation in animations:
animation.clean_up(self)
return self
def get_mobjects_from_last_animation(self):
if hasattr(self, "mobjects_from_last_animation"):
return self.mobjects_from_last_animation
return []
def wait(self, duration=DEFAULT_WAIT_TIME):
if self.should_continually_update():
total_time = 0
for t in self.get_time_progression(duration):
self.continual_update(dt=t - total_time)
self.update_frame()
self.add_frames(self.get_frame())
total_time = t
elif self.skip_animations:
# Do nothing
return self
else:
self.update_frame()
n_frames = int(duration / self.frame_duration)
frame = self.get_frame()
self.add_frames(*[frame] * n_frames)
return self
def wait_to(self, time, assert_positive=True):
if self.ignore_waits:
return
time -= self.current_scene_time
if assert_positive:
assert(time >= 0)
elif time < 0:
return
self.wait(time)
def force_skipping(self):
self.original_skipping_status = self.skip_animations
self.skip_animations = True
return self
def revert_to_original_skipping_status(self):
if hasattr(self, "original_skipping_status"):
self.skip_animations = self.original_skipping_status
return self
def add_frames(self, *frames):
if self.skip_animations:
return
self.current_scene_time += len(frames) * self.frame_duration
if self.write_to_movie:
for frame in frames:
if self.save_pngs:
self.save_image(
"frame" + str(self.frame_num).zfill(4), self.pngs_mode, True)
self.frame_num = self.frame_num + 1
self.writing_process.stdin.write(frame.tostring())
if self.save_frames:
self.saved_frames += list(frames)
# Display methods
def show_frame(self):
self.update_frame(dont_update_when_skipping=False)
self.get_image().show()
def get_image_file_path(self, name=None, dont_update=False):
sub_dir = "images"
if dont_update:
sub_dir = str(self)
path = get_image_output_directory(self.__class__, sub_dir)
file_name = add_extension_if_not_present(name or str(self), ".png")
return os.path.join(path, file_name)
def save_image(self, name=None, mode="RGB", dont_update=False):
path = self.get_image_file_path(name, dont_update)
if not dont_update:
self.update_frame(dont_update_when_skipping=False)
image = self.get_image()
image = image.convert(mode)
image.save(path)
def get_movie_file_path(self, name=None, extension=None):
directory = get_movie_output_directory(
self.__class__, self.camera_config, self.frame_duration
)
if extension is None:
extension = self.movie_file_extension
if name is None:
name = self.name
file_path = os.path.join(directory, name)
if not file_path.endswith(extension):
file_path += extension
return file_path
def open_movie_pipe(self):
name = str(self)
file_path = self.get_movie_file_path(name)
temp_file_path = file_path.replace(name, name + "Temp")
print("Writing to %s" % temp_file_path)
self.args_to_rename_file = (temp_file_path, file_path)
fps = int(1 / self.frame_duration)
height = self.camera.get_pixel_height()
width = self.camera.get_pixel_width()
command = [
FFMPEG_BIN,
'-y', # overwrite output file if it exists
'-f', 'rawvideo',
'-s', '%dx%d' % (width, height), # size of one frame
'-pix_fmt', 'rgba',
'-r', str(fps), # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-loglevel', 'error',
]
if self.movie_file_extension == ".mov":
# This is if the background of the exported video
# should be transparent.
command += [
'-vcodec', 'qtrle',
]
else:
command += [
'-vcodec', 'libx264',
'-pix_fmt', 'yuv420p',
]
command += [temp_file_path]
# self.writing_process = sp.Popen(command, stdin=sp.PIPE, shell=True)
self.writing_process = sp.Popen(command, stdin=sp.PIPE)
def close_movie_pipe(self):
self.writing_process.stdin.close()
self.writing_process.wait()
if os.name == 'nt':
shutil.move(*self.args_to_rename_file)
else:
os.rename(*self.args_to_rename_file)
class EndSceneEarlyException(Exception):
pass
| 35.4875
| 113
| 0.61593
|
4a0fa396a19248edd907d16fc0716706f5465bc8
| 1,138
|
py
|
Python
|
mars/dataframe/reduction/cummax.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/dataframe/reduction/cummax.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/dataframe/reduction/cummax.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import opcodes as OperandDef
from ...config import options
from .core import DataFrameCumReductionOperand, DataFrameCumReductionMixin
class DataFrameCummax(DataFrameCumReductionOperand, DataFrameCumReductionMixin):
_op_type_ = OperandDef.CUMMAX
_func_name = 'cummax'
def cummax(df, axis=None, skipna=True):
use_inf_as_na = options.dataframe.mode.use_inf_as_na
op = DataFrameCummax(axis=axis, skipna=skipna, output_types=df.op.output_types,
use_inf_as_na=use_inf_as_na)
return op(df)
| 37.933333
| 83
| 0.760984
|
4a0fa3a884cefc45f9a564bcd9ca112a1e42c254
| 1,740
|
py
|
Python
|
tests/test_formats_sqlite.py
|
ownport/filemeta
|
0e96abdd71a93c10d1f752b1c3532018297990dd
|
[
"MIT"
] | 1
|
2018-05-07T06:07:41.000Z
|
2018-05-07T06:07:41.000Z
|
tests/test_formats_sqlite.py
|
ownport/filemeta
|
0e96abdd71a93c10d1f752b1c3532018297990dd
|
[
"MIT"
] | 7
|
2017-09-17T19:18:12.000Z
|
2021-03-21T08:52:19.000Z
|
tests/test_formats_sqlite.py
|
ownport/filemeta
|
0e96abdd71a93c10d1f752b1c3532018297990dd
|
[
"MIT"
] | null | null | null |
import json
import pytest
from filetools.formats.sqlite import Metastore, SQLiteDatabase
RECORD = {
'sha256': '5b78afc1c065a1eecbbdb847a30cb1bea163a6ec4a2a8225fd3ca1eb2f952536',
'path': 'tests/resourses/data.file',
'name': 'data.file',
'ext': '.file',
'size': 17079235,
'tags': json.dumps(['tests', 'resources'])
}
# ===================================================
# SQLiteDatabase
#
def test_db_init(tmpdir):
''' test for sqlite db init
'''
path = tmpdir / 'db.sqlite3'
db = SQLiteDatabase(path)
assert db
assert db.connection
assert db.cursor
def test_db_init_no_path():
''' test for sqlite db init with no path
'''
with pytest.raises(RuntimeError):
SQLiteDatabase(None)
def test_db_execute_and_query_methods(tmpdir):
''' test for execute and query methods
'''
path = tmpdir / 'db.sqlite3'
db = SQLiteDatabase(path)
db.execute('CREATE TABLE test (key INTEGER, value BLOB)')
db.execute('INSERT INTO test (key, value) VALUES (:key, :value)', {'key': 1, 'value': 'Value#1'})
db.commit()
assert list(db.query('SELECT * FROM test')) == [
{'key': 1, 'value': 'Value#1'}
]
db.close()
# ===================================================
# Metastore
#
def test_metastore_init(tmpdir):
''' test for metastore init
'''
path = tmpdir / 'metastore.sqlte3'
metastore = Metastore(path)
assert metastore
def test_metastore_put_and_get_methods(tmpdir):
''' test for metastore put and get methods
'''
path = tmpdir / 'metastore.sqlite3'
metastore = Metastore(path)
metastore.put(RECORD)
metastore.commit()
assert list(metastore.get()) == [RECORD, ]
metastore.close()
| 25.217391
| 101
| 0.608621
|
4a0fa3ca3d45186ac1353631e9fa6b0993a396f3
| 7,802
|
py
|
Python
|
tests/garage/tf/policies/test_gaussian_mlp_policy_with_model_transit.py
|
mark-koren/garage
|
a5feda84d8a226225ff6148542b4e53ff4bd0fb5
|
[
"MIT"
] | 1
|
2020-01-05T14:57:43.000Z
|
2020-01-05T14:57:43.000Z
|
tests/garage/tf/policies/test_gaussian_mlp_policy_with_model_transit.py
|
lywong92/garage
|
96cb8887fcae90531a645d540653010e7fe10fcc
|
[
"MIT"
] | null | null | null |
tests/garage/tf/policies/test_gaussian_mlp_policy_with_model_transit.py
|
lywong92/garage
|
96cb8887fcae90531a645d540653010e7fe10fcc
|
[
"MIT"
] | null | null | null |
"""
Unit test for GaussianMLPPolicyWithModel.
This test consists of four different GaussianMLPPolicy: P1, P2, P3
and P4. P1 and P2 are from GaussianMLPPolicy, which does not use
garage.tf.models.GaussianMLPModel while P3 and P4 do use.
This test ensures the outputs from all the policies are the same,
for the transition from using GaussianMLPPolicy to
GaussianMLPPolicyWithModel.
It covers get_action, get_actions, dist_info_sym, kl_sym,
log_likelihood_sym, entropy_sym and likelihood_ratio_sym.
"""
from unittest import mock
import numpy as np
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.misc import tensor_utils
from garage.tf.policies import GaussianMLPPolicy
from garage.tf.policies import GaussianMLPPolicyWithModel
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestGaussianMLPPolicyWithModelTransit(TfGraphTestCase):
@mock.patch('tensorflow.random.normal')
def setUp(self, mock_rand):
mock_rand.return_value = 0.5
super().setUp()
self.box_env = TfEnv(DummyBoxEnv())
self.policy1 = GaussianMLPPolicy(
env_spec=self.box_env, init_std=1.0, name='P1')
self.policy2 = GaussianMLPPolicy(
env_spec=self.box_env, init_std=1.2, name='P2')
self.policy3 = GaussianMLPPolicyWithModel(
env_spec=self.box_env, init_std=1.0, name='P3')
self.policy4 = GaussianMLPPolicyWithModel(
env_spec=self.box_env, init_std=1.2, name='P4')
self.sess.run(tf.global_variables_initializer())
for a, b in zip(self.policy3.get_params(), self.policy1.get_params()):
self.sess.run(tf.assign(b, a))
for a, b in zip(self.policy4.get_params(), self.policy2.get_params()):
self.sess.run(tf.assign(b, a))
self.obs = [self.box_env.reset()]
self.obs_ph = tf.placeholder(
tf.float32, shape=(None, self.box_env.observation_space.flat_dim))
self.action_ph = tf.placeholder(
tf.float32, shape=(None, self.box_env.action_space.flat_dim))
self.dist1_sym = self.policy1.dist_info_sym(self.obs_ph, name='p1_sym')
self.dist2_sym = self.policy2.dist_info_sym(self.obs_ph, name='p2_sym')
self.dist3_sym = self.policy3.dist_info_sym(self.obs_ph, name='p3_sym')
self.dist4_sym = self.policy4.dist_info_sym(self.obs_ph, name='p4_sym')
assert self.policy1.vectorized == self.policy2.vectorized
assert self.policy3.vectorized == self.policy4.vectorized
def test_dist_info_sym_output(self):
dist1 = self.sess.run(
self.dist1_sym, feed_dict={self.obs_ph: self.obs})
dist2 = self.sess.run(
self.dist2_sym, feed_dict={self.obs_ph: self.obs})
dist3 = self.sess.run(
self.dist3_sym, feed_dict={self.obs_ph: self.obs})
dist4 = self.sess.run(
self.dist4_sym, feed_dict={self.obs_ph: self.obs})
assert np.array_equal(dist1['mean'], dist3['mean'])
assert np.array_equal(dist1['log_std'], dist3['log_std'])
assert np.array_equal(dist2['mean'], dist4['mean'])
assert np.array_equal(dist2['log_std'], dist4['log_std'])
@mock.patch('numpy.random.normal')
def test_get_action(self, mock_rand):
mock_rand.return_value = 0.5
action1, _ = self.policy1.get_action(self.obs)
action2, _ = self.policy2.get_action(self.obs)
action3, _ = self.policy3.get_action(self.obs)
action4, _ = self.policy4.get_action(self.obs)
assert np.array_equal(action1, action3)
assert np.array_equal(action2, action4)
actions1, dist_info1 = self.policy1.get_actions([self.obs])
actions2, dist_info2 = self.policy2.get_actions([self.obs])
actions3, dist_info3 = self.policy3.get_actions([self.obs])
actions4, dist_info4 = self.policy4.get_actions([self.obs])
assert np.array_equal(actions1, actions3)
assert np.array_equal(actions2, actions4)
assert np.array_equal(dist_info1['mean'], dist_info3['mean'])
assert np.array_equal(dist_info1['log_std'], dist_info3['log_std'])
assert np.array_equal(dist_info2['mean'], dist_info4['mean'])
assert np.array_equal(dist_info2['log_std'], dist_info4['log_std'])
def test_kl_sym(self):
kl_diff_sym1 = self.policy1.distribution.kl_sym(
self.dist1_sym, self.dist2_sym)
objective1 = tf.reduce_mean(kl_diff_sym1)
kl_func = tensor_utils.compile_function([self.obs_ph], objective1)
kl1 = kl_func(self.obs, self.obs)
kl_diff_sym2 = self.policy3.distribution.kl_sym(
self.dist3_sym, self.dist4_sym)
objective2 = tf.reduce_mean(kl_diff_sym2)
kl_func = tensor_utils.compile_function([self.obs_ph], objective2)
kl2 = kl_func(self.obs, self.obs)
assert np.array_equal(kl1, kl2)
self.assertAlmostEqual(kl1, kl2)
def test_log_likehihood_sym(self):
log_prob_sym1 = self.policy1.distribution.log_likelihood_sym(
self.action_ph, self.dist1_sym)
log_prob_func = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym1)
log_prob1 = log_prob_func(self.obs, [[1, 1]])
log_prob_sym2 = self.policy3.model.networks[
'default'].dist.log_likelihood_sym(self.action_ph, self.dist3_sym)
log_prob_func2 = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym2)
log_prob2 = log_prob_func2(self.obs, [[1, 1]])
assert log_prob1 == log_prob2
log_prob_sym1 = self.policy2.distribution.log_likelihood_sym(
self.action_ph, self.dist2_sym)
log_prob_func = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym1)
log_prob1 = log_prob_func(self.obs, [[1, 1]])
log_prob_sym2 = self.policy4.model.networks[
'default'].dist.log_likelihood_sym(self.action_ph, self.dist4_sym)
log_prob_func2 = tensor_utils.compile_function(
[self.obs_ph, self.action_ph], log_prob_sym2)
log_prob2 = log_prob_func2(self.obs, [[1, 1]])
assert log_prob1 == log_prob2
def test_policy_entropy_sym(self):
entropy_sym1 = self.policy1.distribution.entropy_sym(
self.dist1_sym, name='entropy_sym1')
entropy_func = tensor_utils.compile_function([self.obs_ph],
entropy_sym1)
entropy1 = entropy_func(self.obs)
entropy_sym2 = self.policy3.distribution.entropy_sym(
self.dist3_sym, name='entropy_sym1')
entropy_func = tensor_utils.compile_function([self.obs_ph],
entropy_sym2)
entropy2 = entropy_func(self.obs)
assert entropy1 == entropy2
def test_likelihood_ratio_sym(self):
likelihood_ratio_sym1 = self.policy1.distribution.likelihood_ratio_sym(
self.action_ph,
self.dist1_sym,
self.dist2_sym,
name='li_ratio_sym1')
likelihood_ratio_func = tensor_utils.compile_function(
[self.action_ph, self.obs_ph], likelihood_ratio_sym1)
likelihood_ratio1 = likelihood_ratio_func([[1, 1]], self.obs)
likelihood_ratio_sym2 = self.policy3.distribution.likelihood_ratio_sym(
self.action_ph,
self.dist3_sym,
self.dist4_sym,
name='li_ratio_sym2')
likelihood_ratio_func = tensor_utils.compile_function(
[self.action_ph, self.obs_ph], likelihood_ratio_sym2)
likelihood_ratio2 = likelihood_ratio_func([[1, 1]], self.obs)
assert likelihood_ratio1 == likelihood_ratio2
| 42.868132
| 79
| 0.673289
|
4a0fa565b4df549d0c2c48c15499b4d086d24435
| 237
|
py
|
Python
|
setup.py
|
jupyterjazz/wordninja
|
0f7c909c9e20c117bc5fa445b0ab50fb854e84f1
|
[
"MIT"
] | 3
|
2021-11-24T21:41:03.000Z
|
2021-11-27T13:49:02.000Z
|
setup.py
|
jupyterjazz/wordninja
|
0f7c909c9e20c117bc5fa445b0ab50fb854e84f1
|
[
"MIT"
] | 1
|
2021-11-27T12:55:32.000Z
|
2021-11-27T13:51:58.000Z
|
setup.py
|
jupyterjazz/wordninja
|
0f7c909c9e20c117bc5fa445b0ab50fb854e84f1
|
[
"MIT"
] | 1
|
2021-11-27T13:52:09.000Z
|
2021-11-27T13:52:09.000Z
|
from setuptools import setup
setup(name='wordninja',
version="0.1.0",
packages = ['wordninja'],
package_dir={'wordninja': 'wordninja'},
package_data={'wordninja': ['models/default_model.txt.gz']},
include_package_data=True,
)
| 23.7
| 62
| 0.708861
|
4a0fa5e0aa2daf5d94eb4945bf13b870635ed995
| 18,818
|
py
|
Python
|
log_complete/model_215.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete/model_215.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete/model_215.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 53750.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.349515
| 710
| 0.806515
|
4a0fa6911e7ebecfe0eeb260b447f1530d4054b1
| 879
|
py
|
Python
|
tests/factories.py
|
wroberts/annotator
|
0a1ddaab417676f83723588613c1782fad938a8d
|
[
"BSD-3-Clause"
] | 1
|
2020-09-16T07:47:49.000Z
|
2020-09-16T07:47:49.000Z
|
tests/factories.py
|
wroberts/annotator
|
0a1ddaab417676f83723588613c1782fad938a8d
|
[
"BSD-3-Clause"
] | 37
|
2017-10-14T17:30:04.000Z
|
2021-01-03T17:44:37.000Z
|
tests/factories.py
|
wroberts/annotator
|
0a1ddaab417676f83723588613c1782fad938a8d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Factories to help in tests."""
from datetime import datetime
from factory import LazyFunction, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from annotator.database import db
from annotator.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
first_name = Sequence(lambda n: 'first{0}'.format(n))
last_name = Sequence(lambda n: 'last{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = 'example'
active = True
created_at = datetime.now()
confirmed_at = LazyFunction(datetime.now)
class Meta:
"""Factory configuration."""
model = User
| 23.756757
| 63
| 0.67463
|
4a0fa69ac4e44ee0466186a0c994f8e7f4b9def7
| 5,571
|
py
|
Python
|
training/ModelXGBoost.py
|
grimmlab/HorticulturalSalesPredictions
|
92d582623a278c938d1674b154e370ed43482248
|
[
"MIT"
] | null | null | null |
training/ModelXGBoost.py
|
grimmlab/HorticulturalSalesPredictions
|
92d582623a278c938d1674b154e370ed43482248
|
[
"MIT"
] | null | null | null |
training/ModelXGBoost.py
|
grimmlab/HorticulturalSalesPredictions
|
92d582623a278c938d1674b154e370ed43482248
|
[
"MIT"
] | null | null | null |
import xgboost as xgb
import pandas as pd
import numpy as np
import copy
from training import ModelsBaseClass
class XGBoostRegression(ModelsBaseClass.BaseModel):
"""Class containing XGBoost Regression Model"""
def __init__(self, target_column: str, seasonal_periods: int, tree_meth: str = 'auto', learning_rate: float = 0.3,
max_depth: int = 6, subsample: float = 1, colsample_by_tree: float = 1, n_estimators: int = 100,
gamma: float = 0, alpha: int = 0, reg_lambda: int = 1, one_step_ahead: bool = False):
"""
:param target_column: target_column for prediction
:param seasonal_periods: seasonal periodicity
:param tree_meth: tree_method to use
:param learning_rate: boosting learning rate
:param max_depth: maximum depth for base learners
:param subsample: subsample ration of training instance
:param colsample_by_tree: subsample ratio of columns for constructing each tree
:param n_estimators: number of trees
:param gamma: minimum loss reduction required to make a further partition on leaf node
:param alpha: l1 regularization term
:param reg_lambda: l2 regularization term
:param one_step_ahead: perform one step ahead prediction
"""
super().__init__(target_column=target_column, seasonal_periods=seasonal_periods, name='XGBoostRegression',
one_step_ahead=one_step_ahead)
self.model = xgb.XGBRegressor(tree_method=tree_meth, objective='reg:squarederror', learning_rate=learning_rate,
max_depth=max_depth, subsample=subsample, colsample_by_tree=colsample_by_tree,
random_state=42, n_estimators=n_estimators, gamma=gamma, alpha=alpha,
reg_lambda=reg_lambda, verbosity=0)
def train(self, train: pd.DataFrame, cross_val_call: bool = False) -> dict:
"""
Train XGB model
:param train: train set
:param cross_val_call: called to perform cross validation
:return dictionary with cross validated scores (if specified)
"""
cross_val_score_dict = {}
if cross_val_call:
cross_val_score_dict_ts, self.model = self.get_cross_val_score(train=train)
cross_val_score_dict_shuf, self.model = self.get_cross_val_score(train=train, normal_cv=True)
cross_val_score_dict = {**cross_val_score_dict_ts, **cross_val_score_dict_shuf}
self.model.fit(X=train.drop([self.target_column], axis=1), y=train[self.target_column])
return cross_val_score_dict
def update(self, train: pd.DataFrame, model: xgb.XGBRegressor) -> xgb.XGBRegressor:
"""
Update existing model due to new samples
:param train: train set with new samples
:param model: model to update
:return: updated model
"""
return model.fit(X=train.drop([self.target_column], axis=1), y=train[self.target_column])
def insample(self, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed) insample predictions
:param train: train set
:return: DataFrame with insample predictions
"""
insample = pd.DataFrame(data=self.model.predict(data=train.drop([self.target_column], axis=1)),
index=train.index, columns=['Insample'])
return insample
def predict(self, test: pd.DataFrame, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed), if specified one step ahead, out-of-sample predictions
:param test: test set
:param train: train set
:return: DataFrame with predictions, upper and lower confidence level
"""
if self.one_step_ahead:
train_manip = train.copy()
predict_lst = []
# deep copy model as predict function should not change class model
model = copy.deepcopy(self.model)
for i in range(0, test.shape[0]):
fc = model.predict(data=test.drop([self.target_column], axis=1).iloc[[i]])
train_manip = train_manip.append(test.iloc[[i]])
model = self.update(train=train_manip, model=model)
predict_lst.append(fc)
predict = np.array(predict_lst).flatten()
else:
predict = self.model.predict(data=test.drop([self.target_column], axis=1))
predictions = pd.DataFrame({'Prediction': predict}, index=test.index)
return predictions
def plot_feature_importance(self, importance_type: str = 'weight'):
"""
Plot feature importance for XGB Regressor
:param importance_type: importance type to use
‘weight’: the number of times a feature is used to split the data across all trees.
‘gain’: the average gain across all splits the feature is used in.
‘cover’: the average coverage across all splits the feature is used in.
‘total_gain’: the total gain across all splits the feature is used in.
‘total_cover’: the total coverage across all splits the feature is used in.
"""
feature_important = self.model.get_booster().get_score(importance_type=importance_type)
keys = list(feature_important.keys())
values = list(feature_important.values())
data = pd.DataFrame(data=values, index=keys, columns=["score"]).sort_values(by="score", ascending=False)
data.plot(kind='barh')
| 51.110092
| 119
| 0.652486
|
4a0fa7bf1a316607b63010aac5ab3ccbcc73bff7
| 9,663
|
py
|
Python
|
openff/evaluator/tests/test_protocols/test_yank.py
|
lilyminium/openff-evaluator
|
21da54363009d83110b54d57e4416ae31df3868b
|
[
"MIT"
] | null | null | null |
openff/evaluator/tests/test_protocols/test_yank.py
|
lilyminium/openff-evaluator
|
21da54363009d83110b54d57e4416ae31df3868b
|
[
"MIT"
] | null | null | null |
openff/evaluator/tests/test_protocols/test_yank.py
|
lilyminium/openff-evaluator
|
21da54363009d83110b54d57e4416ae31df3868b
|
[
"MIT"
] | null | null | null |
"""
Units tests for openff.evaluator.protocols.yank
"""
import os
import tempfile
import mdtraj
import numpy as np
import pytest
from openff.evaluator import unit
from openff.evaluator.backends import ComputeResources
from openff.evaluator.forcefield import ParameterGradientKey
from openff.evaluator.protocols.coordinates import BuildCoordinatesPackmol
from openff.evaluator.protocols.forcefield import BuildSmirnoffSystem
from openff.evaluator.protocols.yank import (
LigandReceptorYankProtocol,
SolvationYankProtocol,
)
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance
from openff.evaluator.tests.utils import build_tip3p_smirnoff_force_field
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.timeseries import TimeSeriesStatistics
from openff.evaluator.utils.utils import get_data_filename, temporarily_change_directory
def _setup_dummy_system(directory, substance, number_of_molecules, force_field_path):
os.makedirs(directory, exist_ok=True)
build_coordinates = BuildCoordinatesPackmol("coordinates")
build_coordinates.substance = substance
build_coordinates.max_molecules = number_of_molecules
build_coordinates.execute(str(directory))
assign_parameters = BuildSmirnoffSystem("assign_parameters")
assign_parameters.force_field_path = force_field_path
assign_parameters.coordinate_file_path = build_coordinates.coordinate_file_path
assign_parameters.substance = substance
assign_parameters.execute(str(directory))
return (
build_coordinates.coordinate_file_path,
assign_parameters.parameterized_system,
)
def test_ligand_receptor_yank_protocol():
full_substance = Substance()
full_substance.add_component(
Component(smiles="c1ccccc1", role=Component.Role.Receptor),
ExactAmount(1),
)
full_substance.add_component(
Component(smiles="C", role=Component.Role.Ligand),
ExactAmount(1),
)
full_substance.add_component(
Component(smiles="O", role=Component.Role.Solvent),
MoleFraction(1.0),
)
solute_substance = Substance()
solute_substance.add_component(
Component(smiles="C", role=Component.Role.Ligand),
ExactAmount(1),
)
solute_substance.add_component(
Component(smiles="O", role=Component.Role.Solvent),
MoleFraction(1.0),
)
thermodynamic_state = ThermodynamicState(
temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere
)
with tempfile.TemporaryDirectory() as directory:
with temporarily_change_directory(directory):
force_field_path = "ff.json"
with open(force_field_path, "w") as file:
file.write(build_tip3p_smirnoff_force_field().json())
complex_coordinate_path, complex_system = _setup_dummy_system(
"full", full_substance, 3, force_field_path
)
ligand_coordinate_path, ligand_system = _setup_dummy_system(
"ligand", solute_substance, 2, force_field_path
)
run_yank = LigandReceptorYankProtocol("yank")
run_yank.substance = full_substance
run_yank.thermodynamic_state = thermodynamic_state
run_yank.number_of_iterations = 1
run_yank.steps_per_iteration = 1
run_yank.checkpoint_interval = 1
run_yank.verbose = True
run_yank.setup_only = True
run_yank.ligand_residue_name = "TMP"
run_yank.receptor_residue_name = "TMP"
run_yank.solvated_ligand_coordinates = ligand_coordinate_path
run_yank.solvated_ligand_system = ligand_system
run_yank.solvated_complex_coordinates = complex_coordinate_path
run_yank.solvated_complex_system = complex_system
run_yank.force_field_path = force_field_path
run_yank.execute("", ComputeResources())
@pytest.mark.parametrize("solvent_smiles", ["O", "C(Cl)Cl"])
def test_solvation_yank_protocol(solvent_smiles):
full_substance = Substance()
full_substance.add_component(
Component(smiles="CO", role=Component.Role.Solute),
ExactAmount(1),
)
full_substance.add_component(
Component(smiles=solvent_smiles, role=Component.Role.Solvent),
MoleFraction(1.0),
)
solvent_substance = Substance()
solvent_substance.add_component(
Component(smiles=solvent_smiles, role=Component.Role.Solvent),
MoleFraction(1.0),
)
solute_substance = Substance()
solute_substance.add_component(
Component(smiles="CO", role=Component.Role.Solute),
ExactAmount(1),
)
thermodynamic_state = ThermodynamicState(
temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere
)
with tempfile.TemporaryDirectory() as directory:
with temporarily_change_directory(directory):
force_field_path = "ff.json"
with open(force_field_path, "w") as file:
file.write(build_tip3p_smirnoff_force_field().json())
solvated_coordinate_path, solvated_system = _setup_dummy_system(
"full", full_substance, 2, force_field_path
)
vacuum_coordinate_path, vacuum_system = _setup_dummy_system(
"vacuum", solute_substance, 1, force_field_path
)
run_yank = SolvationYankProtocol("yank")
run_yank.solute = solute_substance
run_yank.solvent_1 = solvent_substance
run_yank.solvent_2 = Substance()
run_yank.thermodynamic_state = thermodynamic_state
run_yank.number_of_iterations = 1
run_yank.steps_per_iteration = 1
run_yank.checkpoint_interval = 1
run_yank.verbose = True
run_yank.setup_only = True
run_yank.solution_1_coordinates = solvated_coordinate_path
run_yank.solution_1_system = solvated_system
run_yank.solution_2_coordinates = vacuum_coordinate_path
run_yank.solution_2_system = vacuum_system
run_yank.electrostatic_lambdas_1 = [1.00]
run_yank.steric_lambdas_1 = [1.00]
run_yank.electrostatic_lambdas_2 = [1.00]
run_yank.steric_lambdas_2 = [1.00]
run_yank.execute("", ComputeResources())
def test_compute_state_energy_gradients(tmpdir):
build_tip3p_smirnoff_force_field().json(os.path.join(tmpdir, "ff.json"))
_, parameterized_system = _setup_dummy_system(
tmpdir, Substance.from_components("O"), 10, os.path.join(tmpdir, "ff.json")
)
protocol = SolvationYankProtocol("")
protocol.thermodynamic_state = ThermodynamicState(
298.15 * unit.kelvin, 1.0 * unit.atmosphere
)
protocol.gradient_parameters = [
ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon")
]
gradients = protocol._compute_state_energy_gradients(
mdtraj.load_dcd(
get_data_filename("test/trajectories/water.dcd"),
get_data_filename("test/trajectories/water.pdb"),
),
parameterized_system.topology,
parameterized_system.force_field.to_force_field(),
True,
ComputeResources(),
)
assert len(gradients) == 1
assert not np.isclose(gradients[0].value, 0.0 * unit.dimensionless)
def test_analyze_phase(monkeypatch, tmpdir):
from simtk import unit as simtk_unit
# Generate the required inputs
build_tip3p_smirnoff_force_field().json(os.path.join(tmpdir, "ff.json"))
coordinate_path, parameterized_system = _setup_dummy_system(
tmpdir, Substance.from_components("O"), 10, os.path.join(tmpdir, "ff.json")
)
solvent_trajectory = mdtraj.load_dcd(
get_data_filename("test/trajectories/water.dcd"),
get_data_filename("test/trajectories/water.pdb"),
)
# Mock the internally called methods.
monkeypatch.setattr(
SolvationYankProtocol,
"_time_series_statistics",
lambda *_: TimeSeriesStatistics(
len(solvent_trajectory), len(solvent_trajectory), 1.0, 0
),
)
monkeypatch.setattr(
SolvationYankProtocol, "_extract_trajectory", lambda *_: solvent_trajectory
)
monkeypatch.setattr(
SolvationYankProtocol,
"_extract_solvent_trajectory",
lambda *_: solvent_trajectory,
)
monkeypatch.setattr(
SolvationYankProtocol, "_compute_state_energy_gradients", lambda *_: []
)
# Build up the protocol.
protocol = SolvationYankProtocol("")
protocol.thermodynamic_state = ThermodynamicState(
298.15 * unit.kelvin, 1.0 * unit.atmosphere
)
protocol.gradient_parameters = [
ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon")
]
protocol.solvent_1 = Substance.from_components("O")
protocol._analysed_output = {
"general": {"solvent1": {"nstates": 1}},
"free_energy": {
"solvent1": {
"kT": 1.0 / simtk_unit.kilojoules_per_mole,
"free_energy_diff": 0.0,
"free_energy_diff_unit": 0.0 * simtk_unit.kilojoules_per_mole,
"free_energy_diff_error": 0.0,
"free_energy_diff_error_unit": 0.0 * simtk_unit.kilojoules_per_mole,
}
},
}
(
free_energy,
solution_trajectory,
solvent_trajectory,
solution_gradients,
solvent_gradients,
) = protocol._analyze_phase(
"", parameterized_system, "solvent1", ComputeResources()
)
| 34.265957
| 88
| 0.684156
|
4a0fa81523832d7f43e902aedf8d905a0e8efc02
| 7,349
|
py
|
Python
|
targeting_oligo_design_app/orbit_tools.py
|
scott-saunders/orbit
|
50d9d01e0578168bee3850d4fd26ca5782db2820
|
[
"MIT"
] | null | null | null |
targeting_oligo_design_app/orbit_tools.py
|
scott-saunders/orbit
|
50d9d01e0578168bee3850d4fd26ca5782db2820
|
[
"MIT"
] | null | null | null |
targeting_oligo_design_app/orbit_tools.py
|
scott-saunders/orbit
|
50d9d01e0578168bee3850d4fd26ca5782db2820
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import holoviews as hv
import panel as pn
#from functions import *
from Bio.Seq import Seq
from Bio.SeqIO import parse
from bokeh.models import BasicTickFormatter
from holoviews import opts
def get_replichore(pos, ori = 3923882.5, ter = 1590250.5 ):
"""
Determine the replichore of a bacterial chromosome for a certain position. Requires origin and terminus positions. Assumes E. coli like organization.
pos : int
Genomic coordinate of interest.
ori : float
Genomic coordinate of the origin of replication.
ter : float
Genomic coordinate of the replication terminus.
"""
pos = int(pos)
if((pos<0)| (pos>4641652)):
raise TypeError("position must be within genome.")
if((pos > ori) | (pos<ter)):
rep = 1
elif((pos<ori) & (pos>ter)):
rep = 2
return rep
def get_target_oligo(left_pos, right_pos, genome, homology = 90, attB_dir = '+', attB_fwd_seq = 'ggcttgtcgacgacggcggtctccgtcgtcaggatcat', verbose = False):
"""
Given a set of parameters, get an ORBIT oligo that targets the lagging strand.
Left and right positions are absolute genomic coordinates that specify the final nucleotides to keep unmodified in the genome,
everything in between will be replaced by attB. In other words the left position nucleotide is the final nt before attB in the oligo.
The right position nt is the first nt after attB in the oligo.
This function determines the lagging strand by calling `get_replichore()` on the left_pos.
Typically attB_dir should be set to the same direction as the gene of interest, such that the integrating plasmid will insert with payload facing downstream.
attB_fwd_seq can be modified, and the total homology can be modified, but should be an even number since homology arms are symmetric.
Verbose prints helpful statements for testing functionality.
Parameters
-----------------
left_pos : int
Left genomic coordinate of desired attB insertion. attB is added immediately after this nt.
right_pos : int
Right genomic coordinate of desired attB insertion. attB is added immediately before this nt.
genome : str
Genome as a string.
homology : int (even)
Total homology length desired for oligo. Arm length = homology / 2.
attB_dir : chr ('+' or '-')
Desired direction of attB based on genomic strand. Typically same direction as gene.
attB_fwd_seq : str
Sequence of attB to insert between homology arms.
verbose : bool
If true, prints details about genomic positions and replichore.
Returns
---------------
oligo : str
Targeting oligo against lagging strand, including the attB sequence in the correct orientation.
"""
left_pos = int(left_pos)
right_pos = int(right_pos)
# Arm length is 1/2 total homology. Arms are symmetric
arm_len = int(homology / 2)
# Arms from genome string. Note 0 indexing of string vs. 1 indexing of genomic coordinates.
# As written, should be inclusive.
left_arm = genome[(left_pos - arm_len):left_pos]
right_arm = genome[(right_pos - 1):(right_pos - 1 + arm_len)]
# Generate attB reverse sequence
seq_attB = Seq(attB_fwd_seq)
attB_rev_seq = str(seq_attB.reverse_complement())
# Replichore 1
if get_replichore(left_pos) == 1:
rep = 1
# Reverse complement replichore 1 sequences.
left_arm_seq = Seq(left_arm)
left_arm_prime = str(left_arm_seq.reverse_complement())
right_arm_seq = Seq(right_arm)
right_arm_prime = str(right_arm_seq.reverse_complement())
# Determine attB direction and paste fwd/rev seq accordingly
if attB_dir == '+':
oligo = right_arm_prime + attB_rev_seq + left_arm_prime
elif attB_dir == '-':
oligo = right_arm_prime + attB_fwd_seq + left_arm_prime
# Replichore 2
elif get_replichore(left_pos) == 2:
rep = 2
# '+' arm sequence used. Determine attB direction and paste accordingly.
if attB_dir == '+':
oligo = left_arm + attB_fwd_seq + right_arm
elif attB_dir == '-':
oligo = left_arm + attB_rev_seq + right_arm
# Verbose print statements
if verbose:
print('left_arm_coord = ', left_pos - arm_len,' : ', left_pos)
print('right_arm_coord = ', right_pos - 1, ' : ', right_pos -1 + arm_len)
print('Replichore = ', rep)
return oligo
def get_pos_details(left_pos, right_pos, homology, direction):
left_pos = int(left_pos)
right_pos = int(right_pos)
replichore = get_replichore(left_pos)
arm_len = int(homology) / 2
rep_dir = str(replichore) + direction
rep_dir_dict = {
"1+": "`5' |-- Right_arm (Downstream) --|-- attB_rev --|-- Left_arm (Upstream) --| 3'`",
"1-": "`5' |-- Right_arm (Upstream) --|-- attB_fwd --|-- Left_arm (Downstream) --| 3'`",
"2+": "`5' |-- Left_arm (Upstream) --|-- attB_fwd --|-- Right_arm (Downstream) --| 3'`",
"2-": "`5' |-- Left_arm (Downstream) --|-- attB_rev --|-- Right_arm (Upstream) --| 3'`"
}
#rep_dir_dict.get(rep_dir, "No info available")
left_arm_str = '\n\n**Left arm:** `(' + str(int(left_pos - arm_len)) + ' - ' + str(left_pos) + ') nt`'
right_arm_str = '\n\n**Right arm:** `(' + str(right_pos) + ' - ' + str(int(right_pos + arm_len)) + ') nt`'
rep_str = '\n\n**Replichore:** `' + str(replichore) + '`'
dir_str = '\n\n**attB direction:** `' + direction + '`'
oligo_len = '\n\n**Oligo length:** `' + str(int(homology + 38)) + ' nt`'
md = left_arm_str + right_arm_str + rep_str + dir_str + oligo_len + '\n\n**Oligo structure:** ' + rep_dir_dict.get(rep_dir, "No info available")
return pn.pane.Markdown(md, width = 1000)
def plot_nearby(left_pos, right_pos, homology, df_genes):
left_pos = int(left_pos)
right_pos = int(right_pos)
arm_len = homology / 2
arms = {'start': [left_pos-arm_len, right_pos], 'stop': [left_pos, right_pos+arm_len],'arm':['left','right'],'target_oligo':['target_oligo','target_oligo']}
left_line = hv.VLine(left_pos).opts(color = 'black', line_width = 1)
right_line = hv.VLine(right_pos).opts(color = 'black', line_width = 1)
arms = hv.Segments(arms, kdims = ['start', 'target_oligo','stop','target_oligo'])
genome_segments = hv.Segments(df_genes, kdims = ['left_pos','Direction', 'right_pos','Direction']).opts(tools = ['hover'])
genome_points = hv.Scatter(df_genes, 'left_pos','Direction') * hv.Scatter(df_genes, 'right_pos','Direction')
genome_labels = hv.Labels(df_genes,kdims = ['center_pos','Direction'], vdims = 'gene_label' ).opts(text_font_size='8pt', text_color='gray', xoffset = 0)
genome_plot = genome_segments * genome_points* genome_labels *left_line * right_line * arms
return genome_plot.opts(xlim = (left_pos - 1000, right_pos + 1000), width = 1000,xformatter = BasicTickFormatter(use_scientific = False))
| 39.090426
| 161
| 0.635052
|
4a0fa84aedf1bb46143122f7829a716544ea45da
| 2,006
|
py
|
Python
|
twint/user.py
|
3nws/twint
|
5555c69eb711636adf5ede16c5665fbece2d83c3
|
[
"MIT"
] | null | null | null |
twint/user.py
|
3nws/twint
|
5555c69eb711636adf5ede16c5665fbece2d83c3
|
[
"MIT"
] | null | null | null |
twint/user.py
|
3nws/twint
|
5555c69eb711636adf5ede16c5665fbece2d83c3
|
[
"MIT"
] | null | null | null |
import datetime
import logging as logme
class user:
type = "user"
def __init__(self):
pass
User_formats = {
'join_date': '%Y-%m-%d',
'join_time': '%H:%M:%S %Z'
}
# ur object must be a json from the endpoint https://api.twitter.com/graphql
def User(ur):
logme.debug(__name__ + ':User')
if 'data' not in ur and 'user' not in ur['data']:
msg = 'malformed json! cannot be parsed to get user data'
logme.fatal(msg)
raise KeyError(msg)
_usr = user()
_usr.id = ur['data']['user']['rest_id']
# _usr.name = ur['data']['user']['legacy']['name']
_usr.location = ur['data']['user']['legacy']['location']
_usr.username = ur['data']['user']['legacy']['screen_name']
# _usr.bio = ur['data']['user']['legacy']['description']
# _usr.url = ur['data']['user']['legacy']['url']
# parsing date to user-friendly format
# _dt = ur['data']['user']['legacy']['created_at']
# _dt = datetime.datetime.strptime(_dt, '%a %b %d %H:%M:%S %z %Y')
# date is of the format year,
# _usr.join_date = _dt.strftime(User_formats['join_date'])
# _usr.join_time = _dt.strftime(User_formats['join_time'])
# :type `int`
# _usr.tweets = int(ur['data']['user']['legacy']['statuses_count'])
# _usr.following = int(ur['data']['user']['legacy']['friends_count'])
# _usr.followers = int(ur['data']['user']['legacy']['followers_count'])
# _usr.likes = int(ur['data']['user']['legacy']['favourites_count'])
# _usr.media_count = int(ur['data']['user']['legacy']['media_count'])
# _usr.is_private = ur['data']['user']['legacy']['protected']
# _usr.is_verified = ur['data']['user']['legacy']['verified']
# _usr.avatar = ur['data']['user']['legacy']['profile_image_url_https']
# _usr.background_image = ur['data']['user']['legacy']['profile_banner_url']
# TODO : future implementation
# legacy_extended_profile is also available in some cases which can be used to get DOB of user
return _usr
| 37.849057
| 98
| 0.611167
|
4a0fa8ba7d089a8b08e178ac04636cdcb4e0add1
| 4,322
|
py
|
Python
|
warrant/__init__.py
|
warrant-dev/warrant-python
|
484e8081f3fa61677105f56fa4768d64f96fdb4a
|
[
"MIT"
] | null | null | null |
warrant/__init__.py
|
warrant-dev/warrant-python
|
484e8081f3fa61677105f56fa4768d64f96fdb4a
|
[
"MIT"
] | null | null | null |
warrant/__init__.py
|
warrant-dev/warrant-python
|
484e8081f3fa61677105f56fa4768d64f96fdb4a
|
[
"MIT"
] | null | null | null |
import requests
import json
__version__ = "0.2.1"
API_ENDPOINT = "https://api.warrant.dev"
API_VERSION = "/v1"
class WarrantException(Exception):
def __init__(self, msg, status_code=-1):
if status_code == -1:
message = 'Warrant error: ' + msg
else:
message = f"Warrant error: {status_code} " + msg
super().__init__(message)
class User(object):
def __init__(self, object_type, object_id, relation):
self.objectType = object_type
self.objectId = object_id
self.relation = relation
class Warrant(object):
def __init__(self, api_key):
self._apiKey = api_key
def _make_post_request(self, uri, json={}):
headers = { "Authorization": "ApiKey " + self._apiKey }
resp = requests.post(url = API_ENDPOINT+API_VERSION+uri, headers = headers, json = json)
if resp.status_code == 200:
return resp.json()
else:
raise WarrantException(msg=resp.text, status_code=resp.status_code)
def _make_get_request(self, uri, params={}):
headers = { "Authorization": "ApiKey " + self._apiKey }
resp = requests.get(url = API_ENDPOINT+API_VERSION+uri, headers = headers, params = params)
if resp.status_code == 200:
return resp.json()
else:
raise WarrantException(msg=resp.text, status_code=resp.status_code)
def create_user(self, user_id=""):
if user_id == "":
payload = {}
else:
payload = { "userId": user_id }
json = self._make_post_request(uri="/users", json=payload)
return json['userId']
def create_tenant(self, tenant_id=""):
if tenant_id == "":
payload = {}
else:
payload = { "tenantId": tenant_id }
json = self._make_post_request(uri="/tenants", json=payload)
return json['tenantId']
def create_session(self, user_id):
if user_id == "":
raise WarrantException(msg="Invalid userId provided")
json = self._make_post_request(uri="/users/"+user_id+"/sessions")
return json['token']
def create_warrant(self, object_type, object_id, relation, user):
if object_type == "" or object_id == "" or relation == "":
raise WarrantException(msg="Invalid object_type, object_id and/or relation")
payload = {
"objectType": object_type,
"objectId": object_id,
"relation": relation
}
if isinstance(user, str):
payload["user"] = { "userId": user }
elif isinstance(user, User):
payload["user"] = json.dumps(user.__dict__)
else:
raise WarrantException(msg="Invalid type for \'user\'. Must be of type User or str")
resp = self._make_post_request(uri="/warrants", json=payload)
return resp['id']
def list_warrants(self, object_type="", object_id="", relation="", user_id=""):
filters = {
"objectType": object_type,
"objectId": object_id,
"relation": relation,
"userId": user_id,
}
resp = self._make_get_request(uri="/warrants", params=filters)
return resp
def is_authorized(self, object_type, object_id, relation, user_to_check):
if object_type == "" or object_id == "" or relation == "":
raise WarrantException(msg="Invalid object_type, object_id and/or relation")
payload = {
"objectType": object_type,
"objectId": object_id,
"relation": relation
}
if isinstance(user_to_check, str):
payload["user"] = { "userId": user_to_check }
elif isinstance(user_to_check, User):
payload["user"] = json.dumps(user_to_check.__dict__)
else:
raise WarrantException(msg="Invalid type for \'user_to_check\'. Must be of type User or str")
headers = { "Authorization": "ApiKey " + self._apiKey }
resp = requests.post(url = API_ENDPOINT+API_VERSION+"/authorize", headers = headers, json=payload)
if resp.status_code == 200:
return True
elif resp.status_code == 401:
return False
else:
raise WarrantException(msg=resp.text, status_code=resp.status_code)
| 37.912281
| 106
| 0.597409
|
4a0fa9e51a3e81480dece5d7980ef48a12897774
| 5,937
|
py
|
Python
|
hummingbot/strategy/avellaneda_market_making/start.py
|
phbrgnomo/hummingbot
|
72382954a06a277248f44b321344186aef1c367c
|
[
"Apache-2.0"
] | 1
|
2021-07-01T20:56:32.000Z
|
2021-07-01T20:56:32.000Z
|
hummingbot/strategy/avellaneda_market_making/start.py
|
phbrgnomo/hummingbot
|
72382954a06a277248f44b321344186aef1c367c
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/strategy/avellaneda_market_making/start.py
|
phbrgnomo/hummingbot
|
72382954a06a277248f44b321344186aef1c367c
|
[
"Apache-2.0"
] | 1
|
2021-10-12T15:40:43.000Z
|
2021-10-12T15:40:43.000Z
|
from typing import (
List,
Tuple,
)
from hummingbot import data_path
import os.path
from hummingbot.client.hummingbot_application import HummingbotApplication
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.strategy.hanging_orders_tracker import HangingOrdersAggregationType
from hummingbot.strategy.avellaneda_market_making import (
AvellanedaMarketMakingStrategy,
)
from hummingbot.strategy.avellaneda_market_making.avellaneda_market_making_config_map import avellaneda_market_making_config_map as c_map
from decimal import Decimal
import pandas as pd
def start(self):
try:
order_amount = c_map.get("order_amount").value
order_optimization_enabled = c_map.get("order_optimization_enabled").value
order_refresh_time = c_map.get("order_refresh_time").value
exchange = c_map.get("exchange").value.lower()
raw_trading_pair = c_map.get("market").value
max_order_age = c_map.get("max_order_age").value
inventory_target_base_pct = 0 if c_map.get("inventory_target_base_pct").value is None else \
c_map.get("inventory_target_base_pct").value / Decimal('100')
filled_order_delay = c_map.get("filled_order_delay").value
order_refresh_tolerance_pct = c_map.get("order_refresh_tolerance_pct").value / Decimal('100')
order_levels = c_map.get("order_levels").value
order_override = c_map.get("order_override").value
hanging_orders_enabled = c_map.get("hanging_orders_enabled").value
hanging_orders_aggregation_type = HangingOrdersAggregationType.NO_AGGREGATION
# if hanging_orders_enabled:
# hanging_orders_aggregation_type = getattr(HangingOrdersAggregationType,
# c_map.get("hanging_orders_aggregation_type").value.upper())
# else:
# hanging_orders_aggregation_type = HangingOrdersAggregationType.NO_AGGREGATION
hanging_orders_cancel_pct = c_map.get("hanging_orders_cancel_pct").value / Decimal('100')
add_transaction_costs_to_orders = c_map.get("add_transaction_costs").value
trading_pair: str = raw_trading_pair
maker_assets: Tuple[str, str] = self._initialize_market_assets(exchange, [trading_pair])[0]
market_names: List[Tuple[str, List[str]]] = [(exchange, [trading_pair])]
self._initialize_wallet(token_trading_pairs=list(set(maker_assets)))
self._initialize_markets(market_names)
self.assets = set(maker_assets)
maker_data = [self.markets[exchange], trading_pair] + list(maker_assets)
self.market_trading_pair_tuples = [MarketTradingPairTuple(*maker_data)]
strategy_logging_options = AvellanedaMarketMakingStrategy.OPTION_LOG_ALL
parameters_based_on_spread = c_map.get("parameters_based_on_spread").value
if parameters_based_on_spread:
risk_factor = order_book_depth_factor = order_amount_shape_factor = None
min_spread = c_map.get("min_spread").value / Decimal(100)
max_spread = c_map.get("max_spread").value / Decimal(100)
vol_to_spread_multiplier = c_map.get("vol_to_spread_multiplier").value
volatility_sensibility = c_map.get("volatility_sensibility").value / Decimal('100')
inventory_risk_aversion = c_map.get("inventory_risk_aversion").value
else:
min_spread = max_spread = vol_to_spread_multiplier = inventory_risk_aversion = volatility_sensibility = None
order_book_depth_factor = c_map.get("order_book_depth_factor").value
risk_factor = c_map.get("risk_factor").value
order_amount_shape_factor = c_map.get("order_amount_shape_factor").value
closing_time = c_map.get("closing_time").value * Decimal(3600 * 24 * 1e3)
volatility_buffer_size = c_map.get("volatility_buffer_size").value
debug_csv_path = os.path.join(data_path(),
HummingbotApplication.main_application().strategy_file_name.rsplit('.', 1)[0] +
f"_{pd.Timestamp.now().strftime('%Y-%m-%d_%H-%M-%S')}.csv")
self.strategy = AvellanedaMarketMakingStrategy(
market_info=MarketTradingPairTuple(*maker_data),
order_amount=order_amount,
order_optimization_enabled=order_optimization_enabled,
inventory_target_base_pct=inventory_target_base_pct,
order_refresh_time=order_refresh_time,
max_order_age=max_order_age,
order_refresh_tolerance_pct=order_refresh_tolerance_pct,
filled_order_delay=filled_order_delay,
order_levels=order_levels,
order_override=order_override,
hanging_orders_enabled=hanging_orders_enabled,
hanging_orders_aggregation_type=hanging_orders_aggregation_type,
hanging_orders_cancel_pct=hanging_orders_cancel_pct,
add_transaction_costs_to_orders=add_transaction_costs_to_orders,
logging_options=strategy_logging_options,
hb_app_notification=True,
parameters_based_on_spread=parameters_based_on_spread,
min_spread=min_spread,
max_spread=max_spread,
vol_to_spread_multiplier=vol_to_spread_multiplier,
volatility_sensibility=volatility_sensibility,
inventory_risk_aversion=inventory_risk_aversion,
order_book_depth_factor=order_book_depth_factor,
risk_factor=risk_factor,
order_amount_shape_factor=order_amount_shape_factor,
closing_time=closing_time,
debug_csv_path=debug_csv_path,
volatility_buffer_size=volatility_buffer_size,
is_debug=False
)
except Exception as e:
self._notify(str(e))
self.logger().error("Unknown error during initialization.", exc_info=True)
| 56.009434
| 137
| 0.716523
|
4a0fab0db78df078b4210f40f8dcf34818b0aa1c
| 3,118
|
py
|
Python
|
tests/test_imputations/test_mice_imputer.py
|
kearnz/autoimpute
|
a214e7ad2c664cd6c57843934ebf159067d6261f
|
[
"MIT"
] | 191
|
2019-03-16T17:00:33.000Z
|
2022-03-11T12:14:17.000Z
|
tests/test_imputations/test_mice_imputer.py
|
kearnz/autoimpute
|
a214e7ad2c664cd6c57843934ebf159067d6261f
|
[
"MIT"
] | 57
|
2019-03-09T23:59:38.000Z
|
2022-03-01T08:17:33.000Z
|
tests/test_imputations/test_mice_imputer.py
|
kearnz/autoimpute
|
a214e7ad2c664cd6c57843934ebf159067d6261f
|
[
"MIT"
] | 19
|
2019-04-13T19:01:23.000Z
|
2021-05-14T08:59:27.000Z
|
"""Tests written to ensure the MiceImputer in the imputations package works.
Note that this also tests the MultipleImputer, which really just passes to
the SingleImputer. SingleImputer has tests, some of which are the same as here.
Tests use the pytest library. The tests in this module ensure the following:
- `test_stochastic_predictive_imputer` test stochastic strategy.
- `test_bayesian_reg_imputer` test bayesian regression strategy.
- `test_bayesian_logistic_imputer` test bayesian logistic strategy.
- `test_pmm_lrd_imputer` test pmm and lrd strategy.
- `test_normal_unit_variance_imputer` test unit variance imputer
"""
import pytest
from autoimpute.imputations import MiceImputer
from autoimpute.utils import dataframes
dfs = dataframes
# pylint:disable=len-as-condition
# pylint:disable=pointless-string-statement
def test_stochastic_predictive_imputer():
"""Test stochastic works for numerical columns of PredictiveImputer."""
# generate linear, then stochastic
imp_p = MiceImputer(strategy={"A":"least squares"})
imp_s = MiceImputer(strategy={"A":"stochastic"})
# make sure both work
_ = imp_p.fit_transform(dfs.df_num)
_ = imp_s.fit_transform(dfs.df_num)
assert imp_p.imputed_["A"] == imp_s.imputed_["A"]
def test_bayesian_reg_imputer():
"""Test bayesian works for numerical column of PredictiveImputer."""
# test designed first - test kwargs and params
imp_b = MiceImputer(strategy={"y":"bayesian least squares"},
imp_kwgs={"y":{"fill_value": "random",
"am": 11, "cores": 2}})
imp_b.fit_transform(dfs.df_bayes_reg)
# test on numerical in general
imp_n = MiceImputer(strategy="bayesian least squares")
imp_n.fit_transform(dfs.df_num)
def test_bayesian_logistic_imputer():
"""Test bayesian works for binary column of PredictiveImputer."""
imp_b = MiceImputer(strategy={"y":"bayesian binary logistic"},
imp_kwgs={"y":{"fill_value": "random"}})
imp_b.fit_transform(dfs.df_bayes_log)
def test_pmm_lrd_imputer():
"""Test pmm and lrd work for numerical column of PredictiveImputer."""
# test pmm first - test kwargs and params
imp_pmm = MiceImputer(strategy={"y":"pmm"},
imp_kwgs={"y": {"fill_value": "random",
"copy_x": False}})
imp_pmm.fit_transform(dfs.df_bayes_reg)
# test lrd second - test kwargs and params
imp_lrd = MiceImputer(strategy={"y":"lrd"},
imp_kwgs={"y": {"fill_value": "random",
"copy_x": False}})
imp_lrd.fit_transform(dfs.df_bayes_reg)
def test_normal_unit_variance_imputer():
"""Test normal unit variance imputer for numerical column"""
imp_pmm = MiceImputer(strategy={"y":"normal unit variance"},)
imp_pmm.fit_transform(dfs.df_bayes_reg)
def test_partial_dependence_imputer():
"""Test to ensure that edge case for partial dependence whandled"""
imp = MiceImputer(strategy='stochastic')
imp.fit_transform(dfs.df_partial_dependence)
| 44.542857
| 79
| 0.692752
|
4a0faba9239bef91fc3a60d6211e34ff02eb9369
| 406
|
py
|
Python
|
invenio_cli/version.py
|
cenouralm/invenio-cli
|
0af8b1aad98f0fe9ef7ef73c4a431cf5900db8a6
|
[
"MIT"
] | null | null | null |
invenio_cli/version.py
|
cenouralm/invenio-cli
|
0af8b1aad98f0fe9ef7ef73c4a431cf5900db8a6
|
[
"MIT"
] | null | null | null |
invenio_cli/version.py
|
cenouralm/invenio-cli
|
0af8b1aad98f0fe9ef7ef73c4a431cf5900db8a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2020 CERN.
# Copyright (C) 2019-2020 Northwestern University.
#
# Invenio-Cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-Cli.
This file is imported by ``invenio_cli.__init__``,
and parsed by ``setup.py``.
"""
__version__ = '0.18.0'
| 25.375
| 72
| 0.70936
|
4a0fabf28da7252d72c03df6ff2609e61b2db576
| 33,652
|
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_decorators.py
|
basilvetas/dagster
|
b08f5534a0b0277dab38cb7b6a46d324e94b8940
|
[
"Apache-2.0"
] | 2
|
2021-06-21T17:50:26.000Z
|
2021-06-21T19:14:23.000Z
|
python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_decorators.py
|
basilvetas/dagster
|
b08f5534a0b0277dab38cb7b6a46d324e94b8940
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_decorators.py
|
basilvetas/dagster
|
b08f5534a0b0277dab38cb7b6a46d324e94b8940
|
[
"Apache-2.0"
] | 1
|
2021-08-18T17:21:57.000Z
|
2021-08-18T17:21:57.000Z
|
# encoding: utf-8
# py27 compat
import re
from datetime import datetime, time
import pendulum
import pytest
from dagster import (
Any,
DagsterInvalidDefinitionError,
DagsterInvariantViolationError,
DependencyDefinition,
Field,
InputDefinition,
Output,
OutputDefinition,
PipelineDefinition,
ScheduleDefinition,
composite_solid,
execute_pipeline,
execute_solid,
lambda_solid,
pipeline,
schedule,
solid,
)
from dagster.core.definitions.decorators import (
daily_schedule,
hourly_schedule,
monthly_schedule,
weekly_schedule,
)
from dagster.core.definitions.schedule import ScheduleExecutionContext
from dagster.core.test_utils import instance_for_test
from dagster.core.utility_solids import define_stub_solid
from dagster.utils.partitions import (
DEFAULT_DATE_FORMAT,
DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE,
DEFAULT_HOURLY_FORMAT_WITH_TIMEZONE,
DEFAULT_MONTHLY_FORMAT,
)
from dateutil.relativedelta import relativedelta
# This file tests a lot of parameter name stuff, so these warnings are spurious
# pylint: disable=unused-variable, unused-argument, redefined-outer-name
def test_no_parens_solid():
called = {}
@lambda_solid
def hello_world():
called["yup"] = True
result = execute_solid(hello_world)
assert called["yup"]
def test_empty_solid():
called = {}
@lambda_solid()
def hello_world():
called["yup"] = True
result = execute_solid(hello_world)
assert called["yup"]
def test_solid():
@solid(output_defs=[OutputDefinition()])
def hello_world(_context):
return {"foo": "bar"}
result = execute_solid(hello_world)
assert result.success
assert result.output_value()["foo"] == "bar"
def test_solid_one_output():
@lambda_solid
def hello_world():
return {"foo": "bar"}
result = execute_solid(hello_world)
assert result.success
assert result.output_value()["foo"] == "bar"
def test_solid_yield():
@solid(output_defs=[OutputDefinition()])
def hello_world(_context):
yield Output(value={"foo": "bar"})
result = execute_solid(hello_world)
assert result.success
assert result.output_value()["foo"] == "bar"
def test_solid_result_return():
@solid(output_defs=[OutputDefinition()])
def hello_world(_context):
return Output(value={"foo": "bar"})
result = execute_solid(hello_world)
assert result.success
assert result.output_value()["foo"] == "bar"
def test_solid_with_explicit_empty_outputs():
@solid(output_defs=[])
def hello_world(_context):
return "foo"
with pytest.raises(DagsterInvariantViolationError) as exc_info:
result = execute_solid(hello_world)
assert (
"Error in solid hello_world: Unexpectedly returned output foo of type "
"<class 'str'>. Solid is explicitly defined to return no results."
) in str(exc_info.value) or (
"Error in solid hello_world: Unexpectedly returned output foo of type "
"<type 'str'>. Solid is explicitly defined to return no results."
) in str(
exc_info.value
) # py2
def test_solid_with_implicit_single_output():
@solid()
def hello_world(_context):
return "foo"
result = execute_solid(hello_world)
assert result.success
assert result.output_value() == "foo"
def test_solid_return_list_instead_of_multiple_results():
@solid(output_defs=[OutputDefinition(name="foo"), OutputDefinition(name="bar")])
def hello_world(_context):
return ["foo", "bar"]
with pytest.raises(DagsterInvariantViolationError) as exc_info:
result = execute_solid(hello_world)
assert "unexpectedly returned output ['foo', 'bar']" in str(exc_info.value)
def test_lambda_solid_with_name():
@lambda_solid(name="foobar")
def hello_world():
return {"foo": "bar"}
result = execute_solid(hello_world)
assert result.success
assert result.output_value()["foo"] == "bar"
def test_solid_with_name():
@solid(name="foobar", output_defs=[OutputDefinition()])
def hello_world(_context):
return {"foo": "bar"}
result = execute_solid(hello_world)
assert result.success
assert result.output_value()["foo"] == "bar"
def test_solid_with_input():
@lambda_solid(input_defs=[InputDefinition(name="foo_to_foo")])
def hello_world(foo_to_foo):
return foo_to_foo
pipeline = PipelineDefinition(
solid_defs=[define_stub_solid("test_value", {"foo": "bar"}), hello_world],
dependencies={"hello_world": {"foo_to_foo": DependencyDefinition("test_value")}},
)
pipeline_result = execute_pipeline(pipeline)
result = pipeline_result.result_for_solid("hello_world")
assert result.success
assert result.output_value()["foo"] == "bar"
def test_lambda_solid_definition_errors():
with pytest.raises(DagsterInvalidDefinitionError, match="positional vararg"):
@lambda_solid(input_defs=[InputDefinition(name="foo")])
def vargs(foo, *args):
pass
def test_solid_definition_errors():
with pytest.raises(DagsterInvalidDefinitionError, match="positional vararg"):
@solid(input_defs=[InputDefinition(name="foo")], output_defs=[OutputDefinition()])
def vargs(context, foo, *args):
pass
with pytest.raises(DagsterInvalidDefinitionError):
@solid(input_defs=[InputDefinition(name="foo")], output_defs=[OutputDefinition()])
def wrong_name(context, bar):
pass
with pytest.raises(DagsterInvalidDefinitionError):
@solid(
input_defs=[InputDefinition(name="foo"), InputDefinition(name="bar")],
output_defs=[OutputDefinition()],
)
def wrong_name_2(context, foo):
pass
with pytest.raises(DagsterInvalidDefinitionError):
@solid(input_defs=[InputDefinition(name="foo")], output_defs=[OutputDefinition()])
def no_context(foo):
pass
with pytest.raises(DagsterInvalidDefinitionError):
@solid(input_defs=[InputDefinition(name="foo")], output_defs=[OutputDefinition()])
def extras(_context, foo, bar):
pass
@solid(
input_defs=[InputDefinition(name="foo"), InputDefinition(name="bar")],
output_defs=[OutputDefinition()],
)
def valid_kwargs(context, **kwargs):
pass
@solid(
input_defs=[InputDefinition(name="foo"), InputDefinition(name="bar")],
output_defs=[OutputDefinition()],
)
def valid(context, foo, bar):
pass
@solid
def valid_because_inference(context, foo, bar):
pass
def test_wrong_argument_to_pipeline():
def non_solid_func():
pass
with pytest.raises(
DagsterInvalidDefinitionError, match="You have passed a lambda or function non_solid_func"
):
PipelineDefinition(solid_defs=[non_solid_func])
with pytest.raises(
DagsterInvalidDefinitionError, match="You have passed a lambda or function <lambda>"
):
PipelineDefinition(solid_defs=[lambda x: x])
def test_descriptions():
@solid(description="foo")
def solid_desc(_context):
pass
assert solid_desc.description == "foo"
def test_any_config_field():
called = {}
conf_value = 234
@solid(config_schema=Field(Any))
def hello_world(context):
assert context.solid_config == conf_value
called["yup"] = True
result = execute_solid(
hello_world, run_config={"solids": {"hello_world": {"config": conf_value}}}
)
assert called["yup"]
def test_solid_no_arg():
with pytest.raises(
DagsterInvalidDefinitionError,
match="does not have required positional parameter 'context'.",
):
@solid
def noop():
return
def test_scheduler():
def define_schedules():
return [
ScheduleDefinition(
name="my_schedule",
cron_schedule="* * * * *",
pipeline_name="test_pipeline",
run_config={},
)
]
@schedule(cron_schedule="* * * * *", pipeline_name="foo_pipeline")
def echo_time_schedule(context):
return {
"echo_time": (
(
context.scheduled_execution_time.isoformat()
if context.scheduled_execution_time
else ""
)
)
}
with instance_for_test() as instance:
context_without_time = ScheduleExecutionContext(instance, None)
execution_time = datetime(year=2019, month=2, day=27)
context_with_time = ScheduleExecutionContext(instance, execution_time)
assert echo_time_schedule.get_run_config(context_without_time) == {"echo_time": ""}
assert echo_time_schedule.get_run_config(context_with_time) == {
"echo_time": execution_time.isoformat()
}
def test_schedule_decorators_sanity():
@solid
def do_nothing(_):
pass
@pipeline
def foo_pipeline():
do_nothing()
@schedule(cron_schedule="* * * * *", pipeline_name="foo_pipeline")
def foo_schedule(context):
return {}
@monthly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_month=3,
start_date=datetime(year=2019, month=1, day=1),
)
def monthly_foo_schedule():
return {}
@weekly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_week=1,
start_date=datetime(year=2019, month=1, day=1),
)
def weekly_foo_schedule():
return {}
@daily_schedule(
pipeline_name="foo_pipeline", start_date=datetime(year=2019, month=1, day=1),
)
def daily_foo_schedule():
return {}
@hourly_schedule(
pipeline_name="foo_pipeline", start_date=datetime(year=2019, month=1, day=1),
)
def hourly_foo_schedule():
return {}
assert not foo_schedule.execution_timezone
assert not monthly_foo_schedule.execution_timezone
assert not weekly_foo_schedule.execution_timezone
assert not hourly_foo_schedule.execution_timezone
assert not daily_foo_schedule.execution_timezone
@schedule(
cron_schedule="* * * * *", pipeline_name="foo_pipeline", execution_timezone="US/Central",
)
def foo_schedule_timezone(context):
return {}
assert foo_schedule_timezone.execution_timezone == "US/Central"
with pytest.raises(
DagsterInvalidDefinitionError,
match=re.escape(
"Invalid execution timezone MadeUpTimeZone for invalid_timezone_foo_schedule"
),
):
@daily_schedule(
pipeline_name="foo_pipeline",
start_date=datetime(year=2019, month=1, day=1),
execution_timezone="MadeUpTimeZone",
)
def invalid_timezone_foo_schedule():
return {}
def _check_partitions(
partition_schedule_def,
expected_num_partitions,
expected_start_date,
expected_format,
expected_relative_delta,
):
partitions = partition_schedule_def.get_partition_set().partition_fn()
assert len(partitions) == expected_num_partitions
assert partitions[0].value == expected_start_date
assert partitions[0].name == expected_start_date.strftime(expected_format)
for index, partition in enumerate(partitions):
partition_value = partitions[0].value + (index * expected_relative_delta)
assert partition.value == partitions[0].value + (index * expected_relative_delta)
assert partition.name == partition_value.strftime(expected_format)
HOURS_UNTIL_FEBRUARY_27 = 24 * (31 + 26)
def test_partitions_for_hourly_schedule_decorators_without_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
context_without_time = ScheduleExecutionContext(instance, None)
start_date = datetime(year=2019, month=1, day=1)
@hourly_schedule(
pipeline_name="foo_pipeline",
start_date=start_date,
execution_time=time(hour=0, minute=25),
)
def hourly_foo_schedule(hourly_time):
return {"hourly_time": hourly_time.isoformat()}
_check_partitions(
hourly_foo_schedule,
HOURS_UNTIL_FEBRUARY_27,
start_date,
DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE,
relativedelta(hours=1),
)
assert hourly_foo_schedule.get_run_config(context_without_time) == {
"hourly_time": pendulum.create(
year=2019, month=2, day=26, hour=23, tz="US/Central"
).isoformat()
}
assert hourly_foo_schedule.should_execute(context_without_time)
# time that's invalid since it corresponds to a partition that hasn't happened yet
# should not execute and should throw if it tries to generate run config
execution_time_with_invalid_partition = datetime(
year=2019, month=2, day=27, hour=3, minute=25
)
context_with_invalid_time = ScheduleExecutionContext(
instance, execution_time_with_invalid_partition
)
assert not hourly_foo_schedule.should_execute(context_with_invalid_time)
with pytest.raises(
DagsterInvariantViolationError,
match="The partition selection function `default_partition_selector` did not return a partition from PartitionSet hourly_foo_schedule_partitions",
):
hourly_foo_schedule.get_run_config(context_with_invalid_time)
valid_time = datetime(year=2019, month=1, day=27, hour=1, minute=25)
context_with_valid_time = ScheduleExecutionContext(instance, valid_time)
assert hourly_foo_schedule.get_run_config(context_with_valid_time) == {
"hourly_time": pendulum.create(
year=2019, month=1, day=27, hour=0, tz="US/Central"
).isoformat()
}
assert hourly_foo_schedule.should_execute(context_with_valid_time)
def test_partitions_for_hourly_schedule_decorators_with_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
start_date = datetime(year=2019, month=1, day=1)
# You can specify a start date with no timezone and it will be assumed to be
# in the execution timezone
@hourly_schedule(
pipeline_name="foo_pipeline",
start_date=start_date,
execution_time=time(hour=0, minute=25),
execution_timezone="US/Central",
)
def hourly_central_schedule(hourly_time):
return {"hourly_time": hourly_time.isoformat()}
assert hourly_central_schedule.execution_timezone == "US/Central"
_check_partitions(
hourly_central_schedule,
HOURS_UNTIL_FEBRUARY_27,
pendulum.instance(start_date, tz="US/Central"),
DEFAULT_HOURLY_FORMAT_WITH_TIMEZONE,
relativedelta(hours=1),
)
valid_time = pendulum.create(
year=2019, month=1, day=27, hour=1, minute=25, tz="US/Central"
)
context_with_valid_time = ScheduleExecutionContext(instance, valid_time)
assert hourly_central_schedule.get_run_config(context_with_valid_time) == {
"hourly_time": pendulum.create(
year=2019, month=1, day=27, hour=0, tz="US/Central"
).isoformat()
}
assert hourly_central_schedule.should_execute(context_with_valid_time)
# You can specify a start date in a different timezone and it will be transformed into the
# execution timezone
start_date_with_different_timezone = pendulum.create(2019, 1, 1, 0, tz="US/Pacific")
@hourly_schedule(
pipeline_name="foo_pipeline",
start_date=start_date_with_different_timezone,
execution_time=time(hour=0, minute=25),
execution_timezone="US/Central",
)
def hourly_central_schedule_with_timezone_start_time(hourly_time):
return {"hourly_time": hourly_time.isoformat()}
_check_partitions(
hourly_central_schedule_with_timezone_start_time,
HOURS_UNTIL_FEBRUARY_27 - 2, # start date is two hours later since it's in PT
start_date_with_different_timezone.in_tz("US/Central"),
DEFAULT_HOURLY_FORMAT_WITH_TIMEZONE,
relativedelta(hours=1),
)
def test_partitions_for_daily_schedule_decorators_without_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
context_without_time = ScheduleExecutionContext(instance, None)
start_date = datetime(year=2019, month=1, day=1)
@daily_schedule(
pipeline_name="foo_pipeline",
start_date=start_date,
execution_time=time(hour=9, minute=30),
)
def daily_foo_schedule(daily_time):
return {"daily_time": daily_time.isoformat()}
_check_partitions(
daily_foo_schedule,
(31 + 26),
start_date,
DEFAULT_DATE_FORMAT,
relativedelta(days=1),
)
valid_daily_time = datetime(year=2019, month=1, day=27, hour=9, minute=30)
context_with_valid_time = ScheduleExecutionContext(instance, valid_daily_time)
assert daily_foo_schedule.get_run_config(context_with_valid_time) == {
"daily_time": pendulum.create(
year=2019, month=1, day=26, tz="US/Central"
).isoformat()
}
assert daily_foo_schedule.should_execute(context_with_valid_time)
assert daily_foo_schedule.get_run_config(context_without_time) == {
"daily_time": pendulum.create(
year=2019, month=2, day=26, tz="US/Central"
).isoformat()
}
assert daily_foo_schedule.should_execute(context_without_time)
def test_partitions_for_daily_schedule_decorators_with_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
start_date = datetime(year=2019, month=1, day=1)
@daily_schedule(
pipeline_name="foo_pipeline",
start_date=start_date,
execution_time=time(hour=9, minute=30),
execution_timezone="US/Central",
)
def daily_central_schedule(daily_time):
return {"daily_time": daily_time.isoformat()}
assert daily_central_schedule.execution_timezone == "US/Central"
_check_partitions(
daily_central_schedule,
(31 + 26),
pendulum.instance(start_date, tz="US/Central"),
DEFAULT_DATE_FORMAT,
relativedelta(days=1),
)
valid_daily_time = pendulum.create(
year=2019, month=1, day=27, hour=9, minute=30, tz="US/Central"
)
context_with_valid_time = ScheduleExecutionContext(instance, valid_daily_time)
assert daily_central_schedule.get_run_config(context_with_valid_time) == {
"daily_time": pendulum.create(
year=2019, month=1, day=26, tz="US/Central"
).isoformat()
}
assert daily_central_schedule.should_execute(context_with_valid_time)
def test_partitions_for_weekly_schedule_decorators_without_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
context_without_time = ScheduleExecutionContext(instance, None)
start_date = datetime(year=2019, month=1, day=1)
@weekly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_week=2,
start_date=start_date,
execution_time=time(9, 30),
)
def weekly_foo_schedule(weekly_time):
return {"weekly_time": weekly_time.isoformat()}
valid_weekly_time = datetime(year=2019, month=1, day=30, hour=9, minute=30)
context_with_valid_time = ScheduleExecutionContext(instance, valid_weekly_time)
assert weekly_foo_schedule.get_run_config(context_with_valid_time) == {
"weekly_time": pendulum.create(
year=2019, month=1, day=22, tz="US/Central"
).isoformat()
}
assert weekly_foo_schedule.should_execute(context_with_valid_time)
assert weekly_foo_schedule.get_run_config(context_without_time) == {
"weekly_time": pendulum.create(
year=2019, month=2, day=19, tz="US/Central"
).isoformat()
}
assert weekly_foo_schedule.should_execute(context_without_time)
_check_partitions(
weekly_foo_schedule, 8, start_date, DEFAULT_DATE_FORMAT, relativedelta(weeks=1)
)
def test_partitions_for_weekly_schedule_decorators_with_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
start_date = datetime(year=2019, month=1, day=1)
@weekly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_week=2,
start_date=start_date,
execution_time=time(9, 30),
execution_timezone="US/Central",
)
def weekly_foo_schedule(weekly_time):
return {"weekly_time": weekly_time.isoformat()}
assert weekly_foo_schedule.execution_timezone == "US/Central"
valid_weekly_time = pendulum.create(
year=2019, month=1, day=30, hour=9, minute=30, tz="US/Central"
)
context_with_valid_time = ScheduleExecutionContext(instance, valid_weekly_time)
assert weekly_foo_schedule.get_run_config(context_with_valid_time) == {
"weekly_time": pendulum.create(
year=2019, month=1, day=22, tz="US/Central"
).isoformat()
}
assert weekly_foo_schedule.should_execute(context_with_valid_time)
_check_partitions(
weekly_foo_schedule,
8,
pendulum.instance(start_date, tz="US/Central"),
DEFAULT_DATE_FORMAT,
relativedelta(weeks=1),
)
def test_partitions_for_monthly_schedule_decorators_without_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
context_without_time = ScheduleExecutionContext(instance, None)
start_date = datetime(year=2019, month=1, day=1)
@monthly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_month=3,
start_date=start_date,
execution_time=time(9, 30),
)
def monthly_foo_schedule(monthly_time):
return {"monthly_time": monthly_time.isoformat()}
valid_monthly_time = datetime(year=2019, month=2, day=3, hour=9, minute=30)
context_with_valid_time = ScheduleExecutionContext(instance, valid_monthly_time)
assert monthly_foo_schedule.get_run_config(
ScheduleExecutionContext(instance, valid_monthly_time)
) == {
"monthly_time": pendulum.create(
year=2019, month=1, day=1, tz="US/Central"
).isoformat()
}
assert monthly_foo_schedule.should_execute(context_with_valid_time)
assert monthly_foo_schedule.get_run_config(context_without_time) == {
"monthly_time": pendulum.create(
year=2019, month=1, day=1, tz="US/Central"
).isoformat()
}
assert monthly_foo_schedule.should_execute(context_without_time)
_check_partitions(
monthly_foo_schedule, 1, start_date, DEFAULT_MONTHLY_FORMAT, relativedelta(months=1)
)
def test_partitions_for_monthly_schedule_decorators_with_timezone():
with instance_for_test() as instance:
with pendulum.test(pendulum.create(2019, 2, 27, 0, 1, 1, tz="US/Central")):
start_date = datetime(year=2019, month=1, day=1)
@monthly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_month=3,
start_date=start_date,
execution_time=time(9, 30),
execution_timezone="US/Central",
)
def monthly_foo_schedule(monthly_time):
return {"monthly_time": monthly_time.isoformat()}
assert monthly_foo_schedule.execution_timezone == "US/Central"
valid_monthly_time = pendulum.create(
year=2019, month=2, day=3, hour=9, minute=30, tz="US/Central"
)
context_with_valid_time = ScheduleExecutionContext(instance, valid_monthly_time)
assert monthly_foo_schedule.get_run_config(
ScheduleExecutionContext(instance, valid_monthly_time)
) == {
"monthly_time": pendulum.create(
year=2019, month=1, day=1, tz="US/Central"
).isoformat()
}
assert monthly_foo_schedule.should_execute(context_with_valid_time)
_check_partitions(
monthly_foo_schedule,
1,
pendulum.instance(start_date, tz="US/Central"),
DEFAULT_MONTHLY_FORMAT,
relativedelta(months=1),
)
def test_schedule_decorators_bad():
@solid
def do_nothing(_):
pass
@pipeline
def foo_pipeline():
do_nothing()
with pytest.raises(DagsterInvalidDefinitionError):
@monthly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_month=32,
start_date=datetime(year=2019, month=1, day=1),
)
def monthly_foo_schedule_over():
return {}
with pytest.warns(
UserWarning,
match=re.escape(
"`start_date` must be at the beginning of the first day of the month for a monthly schedule."
),
):
@monthly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_month=7,
start_date=datetime(year=2019, month=1, day=5),
)
def monthly_foo_schedule_later_in_month():
return {}
with pytest.raises(DagsterInvalidDefinitionError):
@monthly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_month=0,
start_date=datetime(year=2019, month=1, day=1),
)
def monthly_foo_schedule_under():
return {}
with pytest.raises(DagsterInvalidDefinitionError):
@weekly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_week=7,
start_date=datetime(year=2019, month=1, day=1),
)
def weekly_foo_schedule_over():
return {}
with pytest.warns(
UserWarning,
match=re.escape("`start_date` must be at the beginning of a day for a weekly schedule."),
):
@weekly_schedule(
pipeline_name="foo_pipeline",
execution_day_of_week=3,
start_date=datetime(year=2019, month=1, day=1, hour=2),
)
def weekly_foo_schedule_start_later_in_day():
return {}
with pytest.warns(
UserWarning,
match=re.escape("`start_date` must be at the beginning of a day for a daily schedule."),
):
@daily_schedule(
pipeline_name="foo_pipeline", start_date=datetime(year=2019, month=1, day=1, hour=2),
)
def daily_foo_schedule_start_later_in_day():
return {}
with pytest.warns(
UserWarning,
match=re.escape(
"`start_date` must be at the beginning of the hour for an hourly schedule."
),
):
@hourly_schedule(
pipeline_name="foo_pipeline",
start_date=datetime(year=2019, month=1, day=1, hour=2, minute=30),
)
def hourly_foo_schedule_start_later_in_hour():
return {}
def test_solid_docstring():
@solid
def foo_solid(_):
"""FOO_DOCSTRING"""
return
@lambda_solid
def bar_solid():
"""BAR_DOCSTRING"""
return
@solid(name="baz")
def baz_solid(_):
"""BAZ_DOCSTRING"""
return
@lambda_solid(name="quux")
def quux_solid():
"""QUUX_DOCSTRING"""
return
@composite_solid
def comp_solid():
"""COMP_DOCSTRING"""
foo_solid()
@pipeline
def the_pipeline():
"""THE_DOCSTRING"""
quux_solid()
assert foo_solid.__doc__ == "FOO_DOCSTRING"
assert foo_solid.__name__ == "foo_solid"
assert bar_solid.__doc__ == "BAR_DOCSTRING"
assert bar_solid.__name__ == "bar_solid"
assert baz_solid.__doc__ == "BAZ_DOCSTRING"
assert baz_solid.__name__ == "baz_solid"
assert quux_solid.__doc__ == "QUUX_DOCSTRING"
assert quux_solid.__name__ == "quux_solid"
assert comp_solid.__doc__ == "COMP_DOCSTRING"
assert comp_solid.__name__ == "comp_solid"
assert the_pipeline.__doc__ == "THE_DOCSTRING"
assert the_pipeline.__name__ == "the_pipeline"
def test_solid_yields_single_bare_value():
@solid
def return_iterator(_):
yield 1
with pytest.raises(
DagsterInvariantViolationError,
match=re.escape("Compute function for solid return_iterator yielded a value of type <")
+ r"(class|type)"
+ re.escape(
" 'int'> rather than an instance of Output, AssetMaterialization, or ExpectationResult. "
"Values yielded by solids must be wrapped in one of these types. If your solid has a "
"single output and yields no other events, you may want to use `return` instead of "
"`yield` in the body of your solid compute function. If you are already using "
"`return`, and you expected to return a value of type <"
)
+ r"(class|type)"
+ re.escape(
" 'int'>, you may be inadvertently returning a generator rather than the value you "
"expected."
),
):
result = execute_solid(return_iterator)
def test_solid_yields_multiple_bare_values():
@solid
def return_iterator(_):
yield 1
yield 2
with pytest.raises(
DagsterInvariantViolationError,
match=re.escape("Compute function for solid return_iterator yielded a value of type <")
+ r"(class|type)"
+ re.escape(
" 'int'> rather than an instance of Output, AssetMaterialization, or ExpectationResult. "
"Values yielded by solids must be wrapped in one of these types. If your solid has a "
"single output and yields no other events, you may want to use `return` instead of "
"`yield` in the body of your solid compute function. If you are already using "
"`return`, and you expected to return a value of type <"
)
+ r"(class|type)"
+ re.escape(
" 'int'>, you may be inadvertently returning a generator rather than the value you "
"expected."
),
):
result = execute_solid(return_iterator)
def test_solid_returns_iterator():
def iterator():
for i in range(3):
yield i
@solid
def return_iterator(_):
return iterator()
with pytest.raises(
DagsterInvariantViolationError,
match=re.escape("Compute function for solid return_iterator yielded a value of type <")
+ r"(class|type)"
+ re.escape(
" 'int'> rather than an instance of Output, AssetMaterialization, or ExpectationResult. "
"Values yielded by solids must be wrapped in one of these types. If your solid has a "
"single output and yields no other events, you may want to use `return` instead of "
"`yield` in the body of your solid compute function. If you are already using "
"`return`, and you expected to return a value of type <"
)
+ r"(class|type)"
+ re.escape(
" 'int'>, you may be inadvertently returning a generator rather than the value you "
"expected."
),
):
result = execute_solid(return_iterator)
def test_input_default():
@lambda_solid
def foo(bar="ok"):
return bar
result = execute_solid(foo)
assert result.output_value() == "ok"
| 32.863281
| 162
| 0.62662
|
4a0fad41bcf4be3cd578e16efbd94e9cd33e6222
| 315
|
py
|
Python
|
exercises/CursoemVideo/ex022.py
|
arthurguerra/cursoemvideo-python
|
37f45ec25f422673fa9bbeee682e098f14d8ceab
|
[
"MIT"
] | null | null | null |
exercises/CursoemVideo/ex022.py
|
arthurguerra/cursoemvideo-python
|
37f45ec25f422673fa9bbeee682e098f14d8ceab
|
[
"MIT"
] | null | null | null |
exercises/CursoemVideo/ex022.py
|
arthurguerra/cursoemvideo-python
|
37f45ec25f422673fa9bbeee682e098f14d8ceab
|
[
"MIT"
] | null | null | null |
nome = str(input('Digite seu nome: '))
print('Maiúscula: {}.'.format(nome.upper()))
print('Minúscula: {}.'.format(nome.lower()))
print('Total de letras: {}.'.format(len(nome) - nome.count(' ')))
dividido = nome.split()
print('O seu primeiro nome é {} e possui {} letras.'.format(dividido[0], len(dividido[0])))
| 28.636364
| 91
| 0.64127
|
4a0fadb2b1fd2acc57bd20a682e09827646a65d8
| 14
|
py
|
Python
|
songs/src/tests/utils/__init__.py
|
macieyn/moje-357
|
5148cbaf893d58675de14060abccacfa57e8b8ce
|
[
"MIT"
] | 1
|
2021-08-05T11:02:44.000Z
|
2021-08-05T11:02:44.000Z
|
songs/src/tests/utils/__init__.py
|
macieyn/moje-357
|
5148cbaf893d58675de14060abccacfa57e8b8ce
|
[
"MIT"
] | null | null | null |
songs/src/tests/utils/__init__.py
|
macieyn/moje-357
|
5148cbaf893d58675de14060abccacfa57e8b8ce
|
[
"MIT"
] | null | null | null |
# Entry point
| 7
| 13
| 0.714286
|
4a0fb03cb67f43b5348628c6dc07f3dfe9791359
| 1,808
|
py
|
Python
|
meow/webs/templates.py
|
aachurin/meow.webs
|
b3acb92234ba64f40bc7a7947ca2216544ae116a
|
[
"MIT"
] | null | null | null |
meow/webs/templates.py
|
aachurin/meow.webs
|
b3acb92234ba64f40bc7a7947ca2216544ae116a
|
[
"MIT"
] | null | null | null |
meow/webs/templates.py
|
aachurin/meow.webs
|
b3acb92234ba64f40bc7a7947ca2216544ae116a
|
[
"MIT"
] | null | null | null |
import typing
from . import App, Settings, Component
try:
import jinja2
except ImportError: # pragma: nocover
jinja2 = None # type: ignore
class Templates:
def render(self, path: str, **context: object) -> str:
raise NotImplementedError()
class JinjaTemplates(Templates):
def __init__(self, app: App, settings: Settings):
if jinja2 is None: # pragma: nocover
raise RuntimeError("`jinja2` must be installed to use `Templates`.")
def get_loader(path: str) -> jinja2.BaseLoader:
if ":" in path:
package_name, path = path.split(":", 1)
return jinja2.PackageLoader(package_name, path)
else:
return jinja2.FileSystemLoader(path)
loaders: typing.List[jinja2.BaseLoader] = []
for template_dir in settings.TEMPLATE_DIRS:
if isinstance(template_dir, dict):
mapping = {
prefix: get_loader(path) for prefix, path in template_dir.items()
}
loaders.append(jinja2.PrefixLoader(mapping))
else:
loaders.append(get_loader(template_dir))
loader = jinja2.ChoiceLoader(loaders) if len(loaders) > 1 else loaders[0]
self.env = jinja2.Environment(autoescape=True, loader=loader)
self.env.globals["reverse_url"] = app.reverse_url
self.env.globals["static_url"] = app.static_url
def render(self, path: str, **context: object) -> str:
template = self.env.get_template(path)
return template.render(**context)
class TemplatesComponent(Component, singleton=True):
def resolve(self, app: App, settings: Settings) -> Templates:
return JinjaTemplates(app, settings)
TEMPLATES_COMPONENTS = [TemplatesComponent()]
| 33.481481
| 85
| 0.631637
|
4a0fb077032899ece87917bac7bb4bd13a401eb0
| 4,068
|
py
|
Python
|
yahoo_finance.py
|
michaelmu/historical_returns
|
8be99f02865e2dd599ae526a5f3b7fd45177f90e
|
[
"Apache-2.0"
] | null | null | null |
yahoo_finance.py
|
michaelmu/historical_returns
|
8be99f02865e2dd599ae526a5f3b7fd45177f90e
|
[
"Apache-2.0"
] | null | null | null |
yahoo_finance.py
|
michaelmu/historical_returns
|
8be99f02865e2dd599ae526a5f3b7fd45177f90e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import datetime
import requests
import numpy as np
import pandas as pd
import time
import io
import re
from sqlite_cache import SqliteCache
class YahooFinance(object):
_url = ("https://query1.finance.yahoo.com/v7/finance/download/{ticker}"
"?period1={start}&period2={end}&interval={interval}&events={action}")
def __init__(self, sqlite_file=None):
self.yahoo_checked = None
self.dfs = {}
if sqlite_file:
self.cache = SqliteCache(sqlite_file)
else:
self.cache = None
def get_yahoo_crumb(self, force=False, ttl=60*10):
"""
Regenerate the yahoo cookie
"""
# use same cookie for 5 min
if self.yahoo_checked and not force:
now = datetime.datetime.now()
delta = (now - self.yahoo_checked).total_seconds()
if delta < ttl:
return (self.yahoo_crumb, self.yahoo_cookie)
res = requests.get('https://finance.yahoo.com/quote/SPY/history')
self.yahoo_cookie = res.cookies['B']
pattern = re.compile('.*"CrumbStore":{"crumb":"(?P<crumb>[^"]+)"}')
for line in res.text.splitlines():
m = pattern.match(line)
if m is not None:
self.yahoo_crumb = m.groupdict()['crumb']
# Reset timer
self.yahoo_checked = datetime.datetime.now()
print("Yahoo crumb: {} Yahoo cookie: {}".format(self.yahoo_crumb, self.yahoo_cookie))
return (self.yahoo_crumb, self.yahoo_cookie)
def format_date(self, date_str, position):
"""
Format date from string
"""
if date_str is None and position == 'start':
return int(time.mktime(time.strptime('1950-01-01', '%Y-%m-%d')))
if date_str is None and position == 'end':
dt = datetime.datetime.now()
return int(time.mktime(dt.replace(hour=0, minute=0, second=0, microsecond=0).timetuple()))
if isinstance(date_str, datetime.datetime):
return int(time.mktime(date_str.timetuple()))
return int(time.mktime(time.strptime(str(date_str), '%Y-%m-%d')))
def fetch(self, url):
"""
Fetch the url results. Use cached results if exist.
"""
cache_key = url
if self.cache:
res = self.cache.get(cache_key)
if res:
print("Using cached result...")
return res
crumb, cookie = self.get_yahoo_crumb()
url = url + "&crumb=" + crumb
results = requests.get(url, cookies={'B': cookie}).text
if "error" in results:
raise Exception('"Returned error in results', results)
# Cache the results
if self.cache:
self.cache.update(cache_key, results)
return results
def download_ticker(self, ticker, start=None, end=None, interval='1d', action='hist'):
"""
Download ticker results for given action
:Parameters:
tickers : str, list
List of tickers to download
start: str
Download start date string (YYYY-MM-DD) or datetime. Default is 1950-01-01
end: str
Download end date string (YYYY-MM-DD) or datetime. Default is today
interval: str
The time interval for the results. Default: 1d
action: str
One of 'hist', 'div', 'split' for price history, dividends, or stock splits
Default: 'hist'
"""
assert action in ['hist', 'div', 'split']
# format dates
start = self.format_date(start, 'start')
end = self.format_date(end, 'end')
url = self._url.format(ticker=ticker, start=start, end=end, interval=interval, action=action)
res = self.fetch(url)
df = pd.read_csv(
io.StringIO(res), index_col=0, error_bad_lines=False,header=0).replace('null', np.nan).dropna()
df.index = pd.to_datetime(df.index)
return df
| 38.018692
| 107
| 0.58235
|
4a0fb07c9d57946a14dd500f47147bc1471a0b2f
| 2,277
|
py
|
Python
|
dev_cmds.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 6
|
2017-04-01T05:30:08.000Z
|
2017-04-05T14:17:40.000Z
|
dev_cmds.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 1
|
2017-04-04T06:47:13.000Z
|
2017-04-04T14:26:32.000Z
|
dev_cmds.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | null | null | null |
import sublime
import sublime_plugin
import os
def find_project_path(path):
while True:
if not path or os.path.exists(os.path.join(path,
'VintageousPlus.sublime-project')):
return path
path = os.path.dirname(path)
class RunTestsForActiveViewCommand(sublime_plugin.WindowCommand):
'''
Runs tests:
- from a file with the name 'test_<active_file_basename>' if it exists,
- from a file with the .cmd-test[-solo] extension,
- else, from the active file.
'''
def run(self):
v = self.window.active_view()
if v is None:
return
proj_path = find_project_path(v.file_name())
if not proj_path or not v.file_name().endswith(('.py', '.cmd-test', '.cmd-test-solo')):
print(
'Vintageous (Dev): Not a project, cmd-test or python file: '
+ v.file_name())
return
# If it's a test_* file, run it.
if os.path.basename(v.file_name()).startswith('test_'):
self.window.run_command('run_vintageous_tests', {
'active_file_only': True,
'working_dir': proj_path
})
return
# If it's a normal file, try to find its tests.
tail = os.path.join('tests', v.file_name()[len(proj_path) + 1:])
full = os.path.join(proj_path, os.path.dirname(tail),
'test_' + os.path.basename(tail))
if os.path.exists(full):
self.window.run_command('run_vintageous_tests', {
'loader_pattern': os.path.basename(full),
'working_dir': proj_path
})
return
# Otherwise just run it.
self.window.run_command('run_vintageous_tests', {
'active_file_only': True,
'working_dir': proj_path
})
class RunAllTestsCommand(sublime_plugin.WindowCommand):
'''This command only exists because we can't expand ${project_path}
in keymap files.
'''
def run(self):
v = self.window.active_view()
if v is None:
return
self.window.run_command('run_vintageous_tests', {
'working_dir': find_project_path(v.file_name())
})
| 30.36
| 95
| 0.570048
|
4a0fb0d82f43a1fd3274306c2da92455d5e446b2
| 1,931
|
py
|
Python
|
extensions/issues/base.py
|
tjinjoy/oppia
|
ed5ccbd95e42078457d40dde1dda02f1ae6a4354
|
[
"Apache-2.0"
] | 2
|
2021-04-08T01:06:08.000Z
|
2021-06-02T08:20:13.000Z
|
extensions/issues/base.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 35
|
2019-02-23T20:31:21.000Z
|
2019-08-19T12:32:13.000Z
|
extensions/issues/base.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2021-01-28T05:20:56.000Z
|
2021-01-28T05:20:56.000Z
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for defining issues."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from extensions import domain
import python_utils
class BaseExplorationIssueSpec(python_utils.OBJECT):
"""Base issue definition class.
This class is not meant to be user-editable. The only methods in it should
be getter methods.
"""
# Customization arg specifications for the component, including their
# descriptions, schemas and default values. Overridden in subclasses.
_customization_arg_specs = []
@property
def customization_arg_specs(self):
"""The customization arg specs for the components."""
return [
domain.CustomizationArgSpec(**cas)
for cas in self._customization_arg_specs]
def to_dict(self):
"""Gets a dict representing this issue. Only default values are
provided.
"""
return {
'customization_arg_specs': [{
'name': ca_spec.name,
'description': ca_spec.description,
'default_value': ca_spec.default_value,
'schema': ca_spec.schema,
} for ca_spec in self.customization_arg_specs]
}
| 34.482143
| 78
| 0.694977
|
4a0fb1c8d439abb7cb69b7d9ce9619c40cc32435
| 5,147
|
py
|
Python
|
falcon_helpers/tests/test_middlewares/test_parse_jwt.py
|
nZac/falcon-helpers
|
a5b3efa40de38867a572d80d958414fa8d303293
|
[
"BSD-3-Clause"
] | null | null | null |
falcon_helpers/tests/test_middlewares/test_parse_jwt.py
|
nZac/falcon-helpers
|
a5b3efa40de38867a572d80d958414fa8d303293
|
[
"BSD-3-Clause"
] | null | null | null |
falcon_helpers/tests/test_middlewares/test_parse_jwt.py
|
nZac/falcon-helpers
|
a5b3efa40de38867a572d80d958414fa8d303293
|
[
"BSD-3-Clause"
] | null | null | null |
import pathlib
import falcon
import falcon.testing
import jwt
import pytest
from falcon_helpers.middlewares.parsejwt import ParseJWTMiddleware as MW
from falcon_helpers.config import ConfigurationError
@pytest.fixture(scope='module')
def rsa_priv_key():
p = pathlib.Path(__file__).parent.parent.joinpath('files', 'keys', 'testkey')
with open(p.as_posix(), mode='r') as fd:
data = fd.read()
return data
@pytest.fixture(scope='module')
def rsa_pub_key():
p = pathlib.Path(__file__).parent.parent.joinpath('files', 'keys', 'testkey.pub')
with open(p.as_posix(), mode='r') as fd:
data = fd.read()
return data
@pytest.fixture()
def hs_token():
return jwt.encode({'key': 'hs', 'aud': 'test'}, 'secret', algorithm='HS256')
@pytest.fixture()
def rsa_token(rsa_priv_key):
return jwt.encode({'key': 'rsa', 'aud': 'test'}, rsa_priv_key, algorithm='RS256')
def test_init_requires_one_of_a_cookie_or_header():
with pytest.raises(ConfigurationError):
MW(audience='blah')
with pytest.raises(ConfigurationError):
MW(audience='blah', cookie_name='cookie', header_name='header')
assert MW(audience='blah', cookie_name='cookie')
assert MW(audience='blah', header_name='cookie')
def test_init_pubkeys_checks():
with pytest.raises(ConfigurationError):
MW(audience='blah', cookie_name='cookie', pubkey='blank')
assert MW(audience='blah', cookie_name='cookie', pubkey='ssh-rsa blah')
assert MW(audience='blah', cookie_name='cookie', pubkey=None)
def test_verify_request_fails_without_token():
mw = MW(audience='blah', cookie_name='cookie', secret='secret')
with pytest.raises(ValueError):
mw.verify_request(None)
def test_verification_types(hs_token, rsa_token, rsa_pub_key):
with pytest.raises(ConfigurationError) as e:
mw = MW(audience='test', cookie_name='cookie')
mw.verify_request(hs_token)
assert 'HS256' in str(e.value)
assert 'requires a secret key.' in str(e.value)
mw = MW(audience='test', cookie_name='cookie', secret='secret', decode_algorithms=['HS256'])
assert mw.verify_request(hs_token) == {
'key': 'hs',
'aud': 'test',
}
with pytest.raises(ConfigurationError) as e:
mw = MW(audience='test', cookie_name='cookie')
mw.verify_request(rsa_token)
assert 'RS256' in str(e.value)
assert 'requires a public key.' in str(e.value)
mw = MW(audience='test', cookie_name='cookie', pubkey=rsa_pub_key)
assert mw.verify_request(rsa_token) == {
'key': 'rsa',
'aud': 'test',
}
def test_process_request_with_header(hs_token):
app = falcon.API(
middleware=[
MW(audience='test', secret='secret', header_name='Auth', decode_algorithms=['HS256'])
]
)
client = falcon.testing.TestClient(app)
resc = falcon.testing.SimpleTestResource()
app.add_route('/', resc)
resp = client.simulate_get('/')
assert resp.status_code == 200
assert 'auth_token_contents' not in resc.captured_req.context
resp = client.simulate_get(
'/',
headers={
'Auth': hs_token.decode(),
}
)
assert resp.status_code == 200
assert 'auth_token_contents' in resc.captured_req.context
assert resc.captured_req.context['auth_token_contents'] == {
'key': 'hs',
'aud': 'test',
}
def test_process_request_with_cookie(hs_token):
app = falcon.API(middleware=[
MW(audience='test', secret='secret', cookie_name='Auth', decode_algorithms=['HS256'])
])
client = falcon.testing.TestClient(app)
resc = falcon.testing.SimpleTestResource()
app.add_route('/', resc)
resp = client.simulate_get('/')
assert resp.status_code == 200
assert 'auth_token_contents' not in resc.captured_req.context
resp = client.simulate_get('/', headers={'Cookie': 'Auth=' + hs_token.decode()})
assert resp.status_code == 200
assert 'auth_token_contents' in resc.captured_req.context
assert resc.captured_req.context['auth_token_contents'] == {
'key': 'hs',
'aud': 'test',
}
def test_process_request_with_default_failed_action():
app = falcon.API(
middleware=[MW(audience='test', secret='secret', header_name='Auth')]
)
client = falcon.testing.TestClient(app)
resc = falcon.testing.SimpleTestResource()
app.add_route('/', resc)
resp = client.simulate_get('/', headers={'Auth': 'token'})
assert resp.status_code == 200
assert 'auth_token_contents' not in resc.captured_req.context
def test_process_request_with_custom_failed_action():
def custom_failed(exc, req, resp):
raise RuntimeError("works")
app = falcon.API(
middleware=[MW(audience='test', secret='secret', header_name='Auth',
when_fails=custom_failed)]
)
client = falcon.testing.TestClient(app)
resc = falcon.testing.SimpleTestResource()
app.add_route('/', resc)
with pytest.raises(RuntimeError) as e:
client.simulate_get('/', headers={'Auth': 'token'})
assert 'works' == str(e.value)
| 29.244318
| 97
| 0.664659
|
4a0fb378293348c370dee16a30273e40435a1cf5
| 753
|
py
|
Python
|
sample-code/post-message-to-channel.py
|
kirmar/delaysay
|
9cba01d9840ddb1ba35430cc709608256134b798
|
[
"Apache-2.0"
] | null | null | null |
sample-code/post-message-to-channel.py
|
kirmar/delaysay
|
9cba01d9840ddb1ba35430cc709608256134b798
|
[
"Apache-2.0"
] | 2
|
2022-01-19T11:20:06.000Z
|
2022-01-19T11:34:32.000Z
|
sample-code/post-message-to-channel.py
|
kirmar/delaysay
|
9cba01d9840ddb1ba35430cc709608256134b798
|
[
"Apache-2.0"
] | null | null | null |
import os
import slack
import boto3
slack_token_name = os.environ["SLACK_API_TOKEN_NAME"]
slack_channel_id = os.environ["SLACK_CHANNEL_ID"]
ssm_client = boto3.client('ssm')
parameter = ssm_client.get_parameter(
Name=slack_token_name,
WithDecryption=True
)
slack_token = parameter['Parameter']['Value']
slack_client = slack.WebClient(token=slack_token)
slack_client.chat_postMessage(
channel=slack_channel_id,
text="Hello, world! :tada:"
)
# # Why don't link_names=True and username="DelaySay" and
# # icon_emoji=":robot_face:" work??
# slack_client.chat_postMessage(
# channel=slack_channel_id,
# link_names=1,
# text=f"Hello, @{slack_channel_id}! :tada:",
# username='DelaySay',
# icon_emoji=':robot_face:'
# )
| 24.290323
| 57
| 0.730412
|
4a0fb3a67cd4e10c09e83da69af401ade473939f
| 3,096
|
py
|
Python
|
app.py
|
Nomad95/spotify-leds
|
2774a4a66c6e2a38950875d48047d52f6c8403a9
|
[
"MIT"
] | null | null | null |
app.py
|
Nomad95/spotify-leds
|
2774a4a66c6e2a38950875d48047d52f6c8403a9
|
[
"MIT"
] | null | null | null |
app.py
|
Nomad95/spotify-leds
|
2774a4a66c6e2a38950875d48047d52f6c8403a9
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, Session, render_template, request
import os
from time import sleep
from multiprocessing import Process
import configparser
from spotify_background_color import SpotifyBackgroundColor
from current_spotify_playback import CurrentSpotifyPlayback, NoArtworkException
from led_controller import LEDController
app = Flask(__name__)
CLIENT_ID = os.environ.get('SPOTIPY_CLIENT_ID')
CLIENT_SECRET = os.environ.get('SPOTIPY_CLIENT_SECRET')
REDIRECT_URI = os.environ.get('SPOTIPY_REDIRECT_URI')
REFRESH_TOKEN = os.environ.get('SPOTIPY_REFRESH_TOKEN')
Session(app)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/spotify')
def spotify():
p = Session['process']
if not p.is_alive():
p = Process(target=main_spotify, args=())
p.start()
return render_template('spotify.html')
@app.route('/manual')
def manual():
try:
Session['process'].terminate()
except AttributeError:
pass
return render_template('manual.html')
@app.route('/color', methods=['GET', 'POST'])
def color():
if request.method == 'POST':
data = request.json
r = data['r']
g = data['g']
b = data['b']
led.set_color(r, g, b, delay=0)
return jsonify(status='updating', data=data)
else:
curr_r, curr_g, curr_b = led.get_color()
return jsonify(status='current', data={'r': curr_r, 'g': curr_g, 'b': curr_b})
@app.route('/off')
def off():
try:
Session['process'].terminate()
except AttributeError:
pass
led.set_color(0, 0, 0)
return render_template('off.html')
def main_spotify():
old_song_id = ''
while True:
spotify.update_current_playback()
if spotify.connected_to_chromecast(name):
if spotify.new_song(old_song_id):
try:
artwork = spotify.get_artwork()
background_color = SpotifyBackgroundColor(
img=artwork, image_processing_size=(100, 100))
r, g, b = background_color.best_color(
k=8, color_tol=0)
except NoArtworkException:
r, g, b = 255, 255, 255
led.set_color(r, g, b)
old_song_id = spotify.get_current_song_id()
else:
old_song_id = ''
r, g, b = led.get_color()
if r != 0 or g != 0 or b != 0:
led.set_color(0, 0, 0)
sleep(2)
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('config.ini')
GPIO_PINS = config['GPIO PINS']
red_pin = int(GPIO_PINS['red_pin'])
green_pin = int(GPIO_PINS['green_pin'])
blue_pin = int(GPIO_PINS['blue_pin'])
name = config['CHROMECAST']['name']
led = LEDController(red_pin, green_pin, blue_pin)
spotify = CurrentSpotifyPlayback(CLIENT_ID, CLIENT_SECRET,
REDIRECT_URI, REFRESH_TOKEN)
Session['process'] = Process(target=main_spotify, args=())
app.run(host='0.0.0.0')
| 28.666667
| 86
| 0.614987
|
4a0fb4671d18c4644c0142bb00ef68858fa394e3
| 1,613
|
py
|
Python
|
img_aug.py
|
zzdyyy/ProtoPNet
|
d417ac4881e5384db386d9764df7a0ef0f0a6b28
|
[
"MIT"
] | null | null | null |
img_aug.py
|
zzdyyy/ProtoPNet
|
d417ac4881e5384db386d9764df7a0ef0f0a6b28
|
[
"MIT"
] | null | null | null |
img_aug.py
|
zzdyyy/ProtoPNet
|
d417ac4881e5384db386d9764df7a0ef0f0a6b28
|
[
"MIT"
] | 2
|
2022-02-05T02:49:04.000Z
|
2022-02-15T19:40:54.000Z
|
import Augmentor
import os
def makedir(path):
'''
if path does not exist in the file system, create it
'''
if not os.path.exists(path):
os.makedirs(path)
datasets_root_dir = './datasets/cub200_cropped/'
dir = datasets_root_dir + 'train_cropped/'
target_dir = datasets_root_dir + 'train_cropped_augmented/'
makedir(target_dir)
folders = [os.path.join(dir, folder) for folder in next(os.walk(dir))[1]]
target_folders = [os.path.join(target_dir, folder) for folder in next(os.walk(dir))[1]]
for i in range(len(folders)):
fd = folders[i]
tfd = os.path.abspath(target_folders[i])
# rotation
p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd)
p.rotate(probability=1, max_left_rotation=15, max_right_rotation=15)
p.flip_left_right(probability=0.5)
for i in range(10):
p.process()
del p
# skew
p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd)
p.skew(probability=1, magnitude=0.2) # max 45 degrees
p.flip_left_right(probability=0.5)
for i in range(10):
p.process()
del p
# shear
p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd)
p.shear(probability=1, max_shear_left=10, max_shear_right=10)
p.flip_left_right(probability=0.5)
for i in range(10):
p.process()
del p
# random_distortion
#p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd)
#p.random_distortion(probability=1.0, grid_width=10, grid_height=10, magnitude=5)
#p.flip_left_right(probability=0.5)
#for i in range(10):
# p.process()
#del p
| 32.918367
| 87
| 0.691259
|
4a0fb6c91578e6b289897c316054c199ab85d287
| 28,777
|
py
|
Python
|
test.py
|
huawenyu/Boost-Cookbook
|
0419a93be2c8871c34fb64b748ed0daa0f6bf194
|
[
"BSL-1.0"
] | 3
|
2018-10-28T23:13:35.000Z
|
2021-02-03T13:50:22.000Z
|
test.py
|
huawenyu/Boost-Cookbook
|
0419a93be2c8871c34fb64b748ed0daa0f6bf194
|
[
"BSL-1.0"
] | null | null | null |
test.py
|
huawenyu/Boost-Cookbook
|
0419a93be2c8871c34fb64b748ed0daa0f6bf194
|
[
"BSL-1.0"
] | 2
|
2018-06-29T19:46:32.000Z
|
2021-08-10T13:13:00.000Z
|
import os
import sys
import signal
import subprocess
import argparse
from shutil import copyfile
from time import sleep
class tester:
canonize_output = False
outputs = {}
expected = {
'Chapter01/01_A_program_options_base_10_20': ('Fruits count: 30\n', '', 0),
'Chapter01/01_A_program_options_base_20_30': ('Fruits count: 50\n', '', 0),
'Chapter01/01_A_program_options_base_help': ('All options:\n --apples arg how many apples do you have\n --oranges arg how many oranges do you have\n --help produce help message\n\n', '', 1),
'Chapter01/01_B_program_options_short_10_20': ("Error: can not read options configuration file 'apples_oranges.cfg'\nFruits count: 30\n", '', 0),
'Chapter01/01_B_program_options_short_20_30': ("Error: can not read options configuration file 'apples_oranges.cfg'\nFruits count: 50\n", '', 0),
'Chapter01/01_B_program_options_short_70': ("Error: can not read options configuration file 'apples_oranges.cfg'\nError: the option '--oranges' is required but missing\n", '', 2),
'Chapter01/01_B_program_options_short_80_cfg': ('Fruits count: 100\n', '', 0),
'Chapter01/01_B_program_options_short_cfg': ('Fruits count: 30\n', '', 0),
'Chapter01/01_B_program_options_short_help': ('All options:\n -o [ --oranges ] arg oranges that you have\n --name arg your name\n --help produce help message\n -a [ --apples ] arg (=10) apples that you have\n\n', '', 1),
'Chapter01/01_B_program_options_short_no_params': ("Error: can not read options configuration file 'apples_oranges.cfg'\nError: the option '--oranges' is required but missing\n", '', 2),
'Chapter01/02_any': ('Wow! That is great!\n', '', 0),
'Chapter01/03_variant': ('Wow! That is great!\n', '', 0),
'Chapter01/04_A_any_db_example': ('Sum of arithmetic types in database row is: 20.1\n', '', 0),
'Chapter01/04_B_variant_db_example': ('Sum of arithmetic types in database row is: 20.1\n', '', 0),
'Chapter01/05_optional': ('...trying again\n...trying again\nDevice is locked\nSuccess!\n', '', 0),
'Chapter01/07_B_tuple_construction_order': ('012', '', 0),
'Chapter01/09_type_index': ('T is d\nx is i\nT is double\nx is int&&\n', '', 0),
'Chapter01/12_A_noncopyable_movable': ('no C++11\n', '', 0),
'Chapter01/12_B_noncopyable_movable_c++11': ('C++11\n', '', 0),
'Chapter01/13_algorithm': ('48656C6C6F20776F7264\n48656C6C6F20776F7264\n', '', 0),
'Chapter03/01_lexical_to_number': ('bad lexical cast: source type value could not be interpreted as target\n', '', 0),
'Chapter02/01_scoped_ptr': ('str == scoped_ptr\nstr == unique_ptr\n', '', 0),
'Chapter03/03_numeric_cast': ('#47 bad numeric conversion: negative overflow\n#58 bad numeric conversion: positive overflow\n\n\n\nNEG OVERFLOW in #47 bad numeric conversion: negative overflow\nPOS OVERFLOW in #59 bad numeric conversion: positive overflow\n\n\n\nIt works! Not in range!\n', '', 0),
'Chapter04/01_static_assert': ('01', '', 0),
'Chapter04/04_mpl_int_': (' 0 1 2 \x03 4 5\n', '', 0),
'Chapter05/03_atomics': ('shared_i == 0\n', '', 0),
'Chapter05/09_once': ('Print once 0\n', '', 0),
'Chapter06/01_tasks_processor_base': ('', 'Exception: Just checking\nThread interrupted\n', 0),
'Chapter06/02_tasks_processor_timers': ('', 'Exception: It works!\n', 0),
'Chapter06/08_exception_ptr': ('Lexical cast exception detected.\n\nCan not handle such exceptions:\nmain.cpp(48): Throw in function void func_test2()\nDynamic exception type: boost::exception_detail::clone_impl<boost::exception_detail::error_info_injector<std::logic_error> >\nstd::exception::what: Some fatal logic error\n\n', '', 0),
'Chapter06/09_tasks_processor_signals': ('Captured 1 SIGINT\nCaptured 2 SIGINT\nCaptured 3 SIGINT\n', '', 0),
'Chapter07/02_regex_match': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: Input regex: String to match: MATCH\nString to match: MATCH\nString to match: DOES NOT MATCH\nString to match: \nInput regex: String to match: MATCH\nString to match: MATCH\nString to match: DOES NOT MATCH\nString to match: DOES NOT MATCH\nString to match: \nInput regex: ', '', 0),
'Chapter07/02_regex_match_bad_num': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: Incorrect number of regex syntax. Exiting... \n', '', 1),
'Chapter07/02_regex_match_bad_regex': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: Input regex: Incorrect regex pattern!\nInput regex: ', '', 0),
'Chapter07/02_regex_match_extra': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: Input regex: String to match: MATCH\nString to match: DOES NOT MATCH\nString to match: \nInput regex: ', '', 0),
'Chapter07/03_regex_replace': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: \nInput regex: String to match: DOES NOT MATCH\nString to match: MATCH: 4, 2, \nReplace pattern: RESULT: ###4-4-2-4-4###\nString to match: \n\nInput regex: ', '', 0),
'Chapter07/03_regex_replace_bad_num': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: Incorrect number of regex syntax. Exiting... \n', '', 1),
'Chapter07/03_regex_replace_bad_regex': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: \nInput regex: Incorrect regex pattern!\n\nInput regex: ', '', 0),
'Chapter07/03_regex_replace_extra': ('Available regex syntaxes:\n\t[0] Perl\n\t[1] Perl case insensitive\n\t[2] POSIX extended\n\t[3] POSIX extended case insensitive\n\t[4] POSIX basic\n\t[5] POSIX basic case insensitive\n\nChoose regex syntax: \nInput regex: String to match: MATCH: q, w, e, \nReplace pattern: RESULT: ewq\nString to match: \n\nInput regex: ', '', 0),
'Chapter07/04_format': ('Hello, dear Reader! Did you read the book for 100 % !\n100 == 100 && 100% != 100\n\nReader\n\nboost::too_few_args: format-string referred to more arguments than were passed\n', '', 0),
'Chapter07/05_string_algo': ('\n erase_all_copy :Hello hello dear Reader.\n erase_first_copy:Hello hello, dear Reader.\n erase_last_copy :Hello, hello dear Reader.\n ierase_all_copy :, , dear Reader.\n ierase_nth_copy :Hello, hello dear Reader.\n replace_all_copy :Hello! hello! dear Reader.\n replace_first_copy :Hello! hello, dear Reader.\n replace_head_copy :Whaaaaaaa! hello, dear Reader.', '', 0),
'Chapter07/06_iterator_range': ('Sentence #1 : \tThis is a long long character array\n35 characters.\nSentence has 6 whitespaces.\n\nSentence #2 : \tPlease split this character array to sentences\n46 characters.\nSentence has 6 whitespaces.\n\nSentence #3 : \tDo you know, that sentences are separated using period, exclamation mark and question mark\n90 characters.\nSentence has 13 whitespaces.\n\nSentence #4 : \t :-)\n4 characters.\nSentence has 1 whitespaces.\n\n', '', 0),
'Chapter07/07_string_view': ('between brackets\nexpression\no_o\nO_O\n^_^\n', '', 0),
'Chapter08/01_vector_of_types': ('N5boost3mpl6v_itemIN4mpl_6size_tILm32EEENS1_INS3_ILm1EEENS1_INS3_ILm4096EEENS1_INS3_ILm8EEENS1_INS3_ILm4EEENS0_7vector0INS2_2naEEELi0EEELi0EEELi0EEELi0EEELi0EEE', '', 0),
'Chapter08/02_manipulating_vector_of_types': ('N4mpl_5long_ILl4EEE\n', '', 0),
'Chapter08/06_tuple_to_string': ('Meow! 0_0\nMeow! 0_0\nMeow! Meow! \nMeow! Meow! Meow! Meow! Meow! Meow! Meow! Meow! Meow! Meow! \n', '', 0),
'Chapter09/03_hash_h': ('HASH matched: 800000\n', '', 0),
'Chapter09/03_hash_s': ('STD matched: 800000\n', '', 0),
'Chapter09/03_hash_x': ('', '', 2),
'Chapter09/05_bimap': ('Left:\nAnton Polukhin <=> 3\nAntony Polukhin <=> 3\nJohn Snow <=> 1\nVasya Pupkin <=> 2\n\nRight:\n1 <=> John Snow\n2 <=> Vasya Pupkin\n3 <=> Antony Polukhin\n3 <=> Anton Polukhin\n', '', 0),
'Chapter09/06_multiindex': ('0:\nAnton Polukhin, 3, 182, 70\nAntony Polukhin, 3, 183, 70\nJohn Snow, 1, 185, 80\nVasya Pupkin, 2, 165, 60\n\n1:\nJohn Snow, 1, 185, 80\nVasya Pupkin, 2, 165, 60\nAnton Polukhin, 3, 182, 70\nAntony Polukhin, 3, 183, 70\n\n2:\nVasya Pupkin, 2, 165, 60\nAnton Polukhin, 3, 182, 70\nAntony Polukhin, 3, 183, 70\nJohn Snow, 1, 185, 80\n\n3:\nVasya Pupkin, 2, 165, 60\nAntony Polukhin, 3, 183, 70\nAnton Polukhin, 3, 182, 70\nJohn Snow, 1, 185, 80\n\n', '', 0),
'Chapter09/07_slist_and_pool_l': ('std::list: ', '', 0),
'Chapter09/07_slist_and_pool_s': ('slist_t: ', '', 0),
'Chapter09/07_slist_and_pool_x': ("Use 's' for testsing slist performance and 'l' for testsing std::list performance.", '', 0),
'Chapter10/03_no_rtti': ('type_index type_id() [with T = double]', '', 0),
'Chapter11/02_erasing_files': ('', 'Symlink created\n', 0),
'Chapter11/02_erasing_files_second_run': ('', 'Failed to create a symlink\n', 0),
'Chapter11/03_C_dll_usage_do_not': ("They are fast. Faster than you can believe. Don't turn your back, don't look away, and don't blink. Good luck, Sally Sparrow.", '', 0),
'Chapter11/03_C_dll_usage_hello': ('Good to meet you, Sally Sparrow.', '', 0),
'Chapter11/05_interprocess_basics': ('I have index 1. Press any key...\nI have index 2. Press any key...\nI have index 3. Press any key...\nI have index 4. Press any key...\nI have index 5. Press any key...\n', '', 0),
'Chapter11/06_interprocess_queue': ('Filling data\nGettinging data\n', '', 0),
'Chapter11/07_interprocess_pointers': ('Creating structure\nStructure found\n', '', 0),
'Chapter11/08_reading_files_c_files': ('C:', '', 0),
'Chapter11/08_reading_files_create_file': ('', '', 0),
'Chapter11/08_reading_files_error': ('', '', 42),
'Chapter11/08_reading_files_ifstream': ('ifstream:', '', 0),
'Chapter11/08_reading_files_mmap': ('mapped_region:', '', 0),
'Chapter11/09_coroutines': ('OK\n', '', 0),
'Chapter12/01_graph': ('Boost\nC++ guru\n', '', 0),
'Chapter12/02_graph_vis': ('digraph G {\n0 [label="C++"];\n1 [label="STL"];\n2 [label="Boost"];\n3 [label="C++ guru"];\n4 [label="C"];\n0->1 ;\n1->2 ;\n2->3 ;\n4->3 ;\n}\n', '', 0),
'Chapter12/05_testing': ('Running 2 test cases...\n', '\n*** No errors detected\n', 0),
'Chapter12/06_testing_advanced': ('Running 2 test cases...\n', '\n*** No errors detected\n', 0),
}
was_error = False
''' ****************************************** Main functions for testing ************************************* '''
@staticmethod
def safe_wait(task, timeout = 15.0):
# Appveyor may hang on some test. This is a way to early abort
delay = 0.5
while task.poll() is None and timeout > 0:
sleep(delay)
timeout -= delay
if timeout == 0:
task.kill()
print '!!! Test timeout !!!'
sys.exit(-4)
return task.communicate()
@staticmethod
def _test(command, test_name):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out1, out2 = tester.safe_wait(proc)
tester.outputs[test_name] = (out1, out2, proc.returncode)
tester._test_validate(test_name)
@staticmethod
def _print_test_output(test_name):
out = tester.outputs[test_name]
print '--- Stdout:\n{}\n--- Stderr:\n{}\n--- Ret code: {}\n'.format(out[0], out[1], out[2])
@staticmethod
def _test_validate(test_name):
if tester.canonize_output:
return
if tester.outputs[test_name][0] == '' and tester.outputs[test_name][1] == '' and tester.outputs[test_name][2] == 0:
return
tester.outputs[test_name] = (
tester.outputs[test_name][0].replace('\r', ''),
tester.outputs[test_name][1].replace('\r', ''),
tester.outputs[test_name][2],
)
if test_name not in tester.expected:
print '"{}" must not produce output and finish with code 0. Info:'.format(test_name)
tester._print_test_output(test_name)
tester.was_error = True
return
if tester.outputs[test_name][2] != tester.expected[test_name][2]:
print 'Return code in "{}" test is {}, {} expected. Info:'.format(test_name, tester.outputs[test_name][2], tester.expected[test_name][2])
tester._print_test_output(test_name)
tester.was_error = True
if tester.outputs[test_name][0] != tester.expected[test_name][0]:
print 'Output in "{}" test is {}, {} expected. Info:'.format(test_name, tester.outputs[test_name][0], tester.expected[test_name][0])
tester._print_test_output(test_name)
tester.was_error = True
''' ****************************************** Special testing cases ****************************************** '''
@staticmethod
def _test_program_options_base(test_name, path):
command = [path, '--apples=10', '--oranges=20']
tester._test(command, test_name + "_10_20")
command = [path, '--apples=20', '--oranges=30']
tester._test(command, test_name + "_20_30")
command = [path, '--help']
tester._test(command, test_name + "_help")
command = [path, '--apples=70']
# Test throws bad_any_cast as there's no '--oranges' parameter
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out1, out2 = tester.safe_wait(proc)
if proc.returncode == 0:
print '"01_A_program_options_base_70" must finish with code != 0.'
tester.was_error = True
return
@staticmethod
def _test_program_options_short(test_name, path):
command = [path, '--help']
tester._test(command, test_name + "_help")
command = [path]
tester._test(command, test_name + "_no_params")
command = [path, '-a', '10', '--oranges=20']
tester._test(command, test_name + "_10_20")
command = [path, '--apples=20', '--oranges=30']
tester._test(command, test_name + "_20_30")
command = [path, '--apples=70']
tester._test(command, test_name + "_70")
copyfile(
os.path.join(test_name, "apples_oranges.cfg")
, "./apples_oranges.cfg"
)
command = [path, '--apples=80']
tester._test(command, test_name + "_80_cfg")
command = [path]
tester._test(command, test_name + "_cfg")
os.remove("./apples_oranges.cfg")
@staticmethod
def _test_tasks_processor_signals(test_name, path):
if os.name == 'nt':
return # Signals and Windows are not pals!
proc = subprocess.Popen(path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sleep(1)
proc.send_signal(signal.SIGINT)
sleep(0.5)
proc.send_signal(signal.SIGINT)
sleep(0.5)
proc.send_signal(signal.SIGINT)
out1, out2 = tester.safe_wait(proc)
tester.outputs[test_name] = (out1, out2, proc.returncode)
tester._test_validate(test_name)
@staticmethod
def _test_regex_bad(test_name, path):
proc = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
inp = "6\n"
out1, out2 = proc.communicate(input=inp)
tester.outputs[test_name + "_bad_num"] = (out1, out2, proc.returncode)
tester._test_validate(test_name + "_bad_num")
proc = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
inp = "0\n(\\"
out1, out2 = proc.communicate(input=inp)
tester.outputs[test_name + "_bad_regex"] = (out1, out2, proc.returncode)
tester._test_validate(test_name + "_bad_regex")
@staticmethod
def _test_regex_match(test_name, path):
proc = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out1, out2 = proc.communicate(
input=b"0\n(\d{3}[#-]){2}\n123-123#\n312-321-\n21-123-\n\n\n\l{3,5}\nqwe\nqwert\nqwerty\nQWE\n\n\n"
)
tester.outputs[test_name] = (out1, out2, proc.returncode)
tester._test_validate(test_name)
for i in xrange(2, 6):
proc = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
inp = str(i) + b"\n...\nqwe\nqwerty"
out1, out2 = proc.communicate(input=inp)
tester.outputs[test_name + "_extra"] = (out1, out2, proc.returncode)
tester._test_validate(test_name + "_extra")
tester._test_regex_bad(test_name, path)
@staticmethod
def _test_regex_replace(test_name, path):
proc = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out1, out2 = proc.communicate(
input=b"0\n(\d)(\d)\n\\1#\\2\n42\n###\\1-\\1-\\2-\\1-\\1###"
)
tester.outputs[test_name] = (out1, out2, proc.returncode)
tester._test_validate(test_name)
for i in xrange(6):
proc = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
inp = str(i)
if i >= 4:
inp += b"\n\\(.\\)\\(.\\)\\(.\\)\nqwe\n\\3\\2\\1"
else:
inp += b"\n(.)(.)(.)\nqwe\n\\3\\2\\1"
out1, out2 = proc.communicate(input=inp)
tester.outputs[test_name + "_extra"] = (out1, out2, proc.returncode)
tester._test_validate(test_name + "_extra")
tester._test_regex_bad(test_name, path)
@staticmethod
def _test_export_import(test_name, path):
try:
copyfile(
"Chapter10/06_A_my_library/debug/06_A_my_library.dll",
"./06_A_my_library.dll"
)
except:
pass
try:
copyfile(
"Chapter10/06_A_my_library/release/06_A_my_library.dll",
"./06_A_my_library.dll"
)
except:
pass
tester._test(path, test_name)
@staticmethod
def _test_hash(test_name, path):
tester._test(path, test_name)
tester._test([path, 'h'], test_name + '_h')
tester._test([path, 's'], test_name + '_s')
tester._test([path, 'x'], test_name + '_x')
@staticmethod
def _test_slist_and_pool(test_name, path):
tester._test(path, test_name)
tester._test([path, 's'], test_name + '_s')
tester._test([path, 'l'], test_name + '_l')
tester._test([path, 'x'], test_name + '_x')
@staticmethod
def _test_erasing_files(test_name, path):
tester._test(path, test_name)
tester._test(path, test_name + '_second_run')
@staticmethod
def _test_plugins(test_name, path):
plugins = []
for folder, _, files in os.walk('Chapter11'):
for f in files:
if 'plugin' not in f:
continue
plugin_path = os.path.join(folder, f)
if plugin_path.endswith('.so') or plugin_path.endswith('.dll'):
plugins.append(plugin_path)
for p in plugins:
new_test_name = test_name
if 'hello' in p:
tester._test([path, p], test_name + '_hello')
else:
tester._test([path, p], test_name + '_do_not')
@staticmethod
def _test_interprocess_basic(test_name, path):
procs = []
for x in xrange(5):
procs.append( subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) )
sleep(0.5) # Giving time for processes to start
out1 = ""
out2 = ""
retcode = 0
for p in procs:
out1_tmp, out2_tmp = p.communicate(input='any_key')
out1 += out1_tmp
out2 += out2_tmp
retcode += p.returncode
tester.outputs[test_name] = (out1, out2, retcode)
tester._test_validate(test_name)
@staticmethod
def _test_reading_files(test_name, path):
tester._test([path, 'c'], test_name + '_create_file')
tester._test([path, 'm'], test_name + '_mmap')
tester._test([path, 'r'], test_name + '_ifstream')
tester._test([path, 'a'], test_name + '_c_files')
tester._test([path, 'e'], test_name + '_error')
@staticmethod
def _test_interprocess_run_two_concurrently(test_name, path):
procs = []
for x in xrange(2):
procs.append( subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) )
sleep(0.5) # Giving time for processes to start
out1 = ""
out2 = ""
retcode = 0
for p in procs:
out1_tmp, out2_tmp = tester.safe_wait(p)
out1 += out1_tmp
out2 += out2_tmp
retcode += p.returncode
tester.outputs[test_name] = (out1, out2, retcode)
tester._test_validate(test_name)
@staticmethod
def _test_gil(test_name, path):
tester._test(path, test_name)
@staticmethod
def _test_but_ignore_output_diff(test_name, path):
proc = subprocess.Popen(path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out1, out2 = tester.safe_wait(proc)
if out1 == '' and out2 == '':
print 'No output in "{}" test\n'.format(test_name)
tester.was_error = True
tester.outputs[test_name] = (out1, out2, proc.returncode)
if tester.canonize_output:
return
if proc.returncode != 0:
print 'Return code in "{}" test is {}, {} expected. Info:'.format(test_name, tester.outputs[test_name][2], tester.expected[test_name][2])
tester._print_test_output(test_name)
tester.was_error = True
@staticmethod
def _ignore(test_name, path):
pass
''' ****************************************** Private functions ********************************************** '''
@staticmethod
def _test_recipe(path):
special_cases = {
"Chapter01/01_A_program_options_base": tester._test_program_options_base,
"Chapter01/01_B_program_options_short": tester._test_program_options_short,
"Chapter01/09_type_index": tester._test_but_ignore_output_diff, # Different demangled representation of a type
"Chapter01/12_A_noncopyable_movable": tester._test_but_ignore_output_diff, # Different C++11 support
"Chapter05/02_mutex": tester._test_but_ignore_output_diff, # Intentionally has data race
"Chapter06/08_exception_ptr": tester._test_but_ignore_output_diff, # Different demangled exception name
"Chapter06/09_tasks_processor_signals": tester._test_tasks_processor_signals,
"Chapter07/02_regex_match": tester._test_regex_match,
"Chapter07/03_regex_replace": tester._test_regex_replace,
'Chapter08/01_vector_of_types': tester._test_but_ignore_output_diff, # Different manglings
'Chapter08/02_manipulating_vector_of_types': tester._test_but_ignore_output_diff, # Different manglings
'Chapter09/03_hash': tester._test_hash,
'Chapter09/04_unordered': tester._test_but_ignore_output_diff,
'Chapter09/07_slist_and_pool': tester._test_slist_and_pool,
"Chapter10/03_no_rtti": tester._test_but_ignore_output_diff, # Different demangled representation of a type
"Chapter10/06_B_export_import": tester._test_export_import,
"Chapter11/01_listing_files": tester._test_but_ignore_output_diff,
"Chapter11/02_erasing_files": tester._test_erasing_files,
"Chapter11/03_C_dll_usage": tester._test_plugins,
"Chapter11/04_stacktrace": tester._test_but_ignore_output_diff,
"Chapter11/05_interprocess_basics": tester._test_interprocess_basic,
"Chapter11/06_interprocess_queue": tester._test_interprocess_run_two_concurrently,
"Chapter11/07_interprocess_pointers": tester._test_interprocess_run_two_concurrently,
"Chapter11/08_reading_files": tester._test_reading_files,
"Chapter11/09_coroutines": tester._test_but_ignore_output_diff, # Sanitizers do not like coroutines and add some warnings
"Chapter12/03_random": tester._test_but_ignore_output_diff,
# TODO:
"Chapter12/07_gil": tester._ignore, #tester._test_gil,
}
test_name = os.path.dirname(os.path.relpath(path)).replace('\\release', '').replace('\\debug', '').replace('\\', '/')
print "* {}".format(test_name)
test_name = test_name.replace('/flat', '').replace('\\flat', '')
if test_name in special_cases:
f = special_cases[test_name]
f(test_name, path)
else:
tester._test(path, test_name)
@staticmethod
def _print_outputs():
print "\n\nOutput"
for o in sorted(tester.outputs):
print " '{}': {},".format(o, tester.outputs[o])
@staticmethod
def _print_outputs_short():
print "\n\nOutput"
for o in sorted(tester.outputs):
if tester.outputs[o][0] != '' or tester.outputs[o][1] != '' or tester.outputs[o][2] != 0:
print " '{}': {},".format(o, tester.outputs[o])
@staticmethod
def _is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK) and (os.name != 'nt' or '.exe' in path) and '/.git/' not in path and '.sh' not in path and '.so' not in path
''' ****************************************** Public functions *********************************************** '''
@staticmethod
def run_tests(root_directory='.', verbose=False):
print "Searching for executables..."
executables = []
for folder, _, files in os.walk(root_directory):
for f in files:
path = os.path.join(folder, f)
if tester._is_exe(path):
executables.append(path)
executables.sort()
print "\nStarting tests..."
for e in executables:
tester._test_recipe(e)
print "... tests finished"
if tester.canonize_output:
tester._print_outputs_short()
sys.exit(-3)
if verbose or tester.was_error:
tester._print_outputs()
if tester.was_error:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! FAILURE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
sys.exit(-1)
print "\n*** SUCESS ***"
if __name__ == "__main__":
print "Initializing"
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--dir', default='.', help='Directory (chapter) to run tests for')
parser.add_argument('--build', '-b', action='store_true', help='Build the recipes for Linux platform')
parser.add_argument('--verbose', '-v', action='store_true', help='Output all the results')
args = parser.parse_args()
if args.build:
old_path = os.getcwd()
os.chdir(args.dir)
subprocess.check_call(['qmake', '.'])
subprocess.check_call(['make', '-j4'])
os.chdir(old_path)
tester.run_tests(args.dir, args.verbose)
if args.build:
old_path = os.getcwd()
os.chdir(args.dir)
subprocess.check_call(['make', 'distclean'])
os.chdir(old_path)
| 55.022945
| 527
| 0.6255
|
4a0fb6d362d9fd599683512bfcfe77a5b4de66eb
| 13,518
|
py
|
Python
|
sins/database/database.py
|
fgnt/sins
|
80517a51dd6cedb3df98d61d9d0586a1f96d9e28
|
[
"MIT"
] | 7
|
2020-04-09T07:43:52.000Z
|
2022-03-10T02:09:28.000Z
|
sins/database/database.py
|
fgnt/sins
|
80517a51dd6cedb3df98d61d9d0586a1f96d9e28
|
[
"MIT"
] | null | null | null |
sins/database/database.py
|
fgnt/sins
|
80517a51dd6cedb3df98d61d9d0586a1f96d9e28
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from copy import deepcopy
import lazy_dataset
import numpy as np
import samplerate
import soundfile
from cached_property import cached_property
from lazy_dataset.database import JsonDatabase
from sins.database.utils import annotate, add_audio_paths
from sins.paths import jsons_dir
class SINS(JsonDatabase):
"""
SINS Database class providing utility functions to read and preprocess
the SINS data.
"""
def __init__(self, json_path=jsons_dir / 'sins.json'):
"""
Args:
json_path: path to json that was created using
`sins.database.create_json`
"""
super().__init__(json_path)
@property
def node_to_room(self):
"""
Returns: dict with nodes as keys and corresponding room as value.
"""
return self.data["node_to_room"]
@cached_property
def room_to_nodes(self):
"""
Returns: dict with rooms as keys and list of corresponding nodes
as values.
"""
room_to_nodes_ = defaultdict(list)
for node, room in self.node_to_room.items():
room_to_nodes_[room].append(node)
return {room: sorted(nodes) for room, nodes in room_to_nodes_.items()}
@property
def sessions(self):
"""
Returns: list of session tuples (<label>, <onset>, <offset>) sorted by
onset.
"""
return deepcopy(self.data["annotations"])
@cached_property
def day_ranges(self):
"""
Returns: a list of (<onset>, <offset>) for each day.
Day boundaries are in the middle of the sleeping session resulting
in 8 days with the first and the last being half days.
"""
ranges = []
sessions = sorted(self.sessions, key=(lambda x: x[1]))
cur_offset = sessions[0][1]
for i, session in enumerate(sessions):
if session[0] == "sleeping:bedroom":
split_time = (session[1] + session[2]) / 2
ranges.append((cur_offset, split_time))
cur_offset = split_time
ranges.append((cur_offset, sessions[-1][2]))
return ranges
@property
def train_ranges(self):
"""
Returns: list of the suggested training ranges (days)
"""
return [self.day_ranges[i] for i in [0, 2, 3, 6, 7]]
@property
def validate_ranges(self):
"""
Returns: list of the suggested validation ranges (days)
"""
return [self.day_ranges[5]]
@property
def eval_ranges(self):
"""
Returns: list of the suggested evaluation ranges (days)
"""
return [self.day_ranges[i] for i in [1, 4]]
def get_segments(
self, dataset_name, max_segment_length, min_segment_length=.1,
time_ranges=None, sessions=None, session_key="scene",
annotations=None
):
"""prepare dataset(s) providing time segments within certain
time ranges with configurable segment length.
Segments are dictionaries providing the paths the audio data together
with additional information such as timestamp, audio_length and labels.
The structure of a segment is
{
"example_id": str
"timestamp": float,
"audio_length": float,
"audio_path": list of str,
"audio_start_samples": list of int,
"audio_stop_samples": list of int,
"dataset": str,
"node_id": int,
# optional:
<label_name>: str or list of str,
<label_name>_start_times: str or list of str,
<label_name>_stop_times: str or list of str,
}
The timestamp states the starting point of the segment in seconds,
counted from the beginning of recording.
Do note that the timing information provided by the file names was
found to be inaccurate, which is why we used the reference clock signal
to refine the timestamps. This is done when writing the database json.
If you request segments from multiple nodes they will be in parallel,
i.e. the n-th segment from Node1 and the n-th segment of Node2 have the
same on- & offsets (timestamp & audio_length).
Note that a segment does not start at the beginning of a certain audio,
it rather starts somewhere within an audio file, then may span over a
couple of complete audio files, and then stop within an audio file
again. The exact position of the start and stop points is given by
audio_start_samples and audio_stop_samples, which is given for each
audio file in the segment.
Args:
dataset_name: str or list of dataset name(s) where dataset names
are of the form Node<idx>
max_segment_length: maximal length of segment in seconds. This will
be the length of the returned segment unless the segment is the
last within a certain range.
min_segment_length: If a segment at the end of a time range is
shorter, than it will be discarded. If you want to have fix
length segments choose min_segment_length=max_segment_length.
time_ranges: time ranges from which to read segments
sessions: optional list of tuples
(<session>, <start_time>, <stop_time>). If given each segment
will be from a single session, i.e. no segment wil span over a
session change.
session_key: If not None and sessions not None session labels will
be added to the segment dict under this key.
annotations: None or dict of lists of tuples
(<label>, <start_time>, <stop_time>). If not None, for each
key (label_name) there will be entries <label_name>
<label_name>_start_times <label_name>_stop_times with values
stating the labels, start_times and stop_times within the
segment.
Returns: (list of) lazy dataset(s) providing (parallel) segments.
"""
if isinstance(dataset_name, (tuple, list)):
return [
self.get_segments(
name,
max_segment_length=max_segment_length,
min_segment_length=min_segment_length,
time_ranges=time_ranges,
sessions=sessions, session_key=session_key,
annotations=annotations
)
for name in dataset_name
]
if time_ranges is None:
time_ranges = [(
self.data["annotations"][0][1], self.data["annotations"][-1][2]
)]
else:
time_ranges = sorted(time_ranges, key=lambda x: x[0])
assert all([
time_ranges[i][0] >= time_ranges[i - 1][1]
for i in range(1, len(time_ranges))]
)
if sessions is None:
segments = get_segments(
time_ranges,
max_segment_length=max_segment_length,
min_segment_length=min_segment_length,
segment_key_prefix=dataset_name + '_',
dataset=self.get_examples(dataset_name)
)
else:
sessions = [
(session[0], max(session[1], start), min(session[2], stop))
for start, stop in time_ranges
for session in sessions
if session[1] < stop and session[2] > start
]
segments = get_session_segments(
sessions=sessions,
max_segment_length=max_segment_length,
min_segment_length=min_segment_length,
session_key=session_key,
segment_key_prefix=dataset_name + '_',
dataset=self.get_examples(dataset_name)
)
if annotations is not None:
for key, annotation in annotations.items():
if isinstance(annotation, dict):
annotation = annotation.values()
annotate(
segments,
annotation=sorted(annotation, key=lambda x: x[1]),
label_key=key
)
return self._lazy_dataset_from_dict(segments, dataset_name)
@staticmethod
def _lazy_dataset_from_dict(examples, dataset_name):
for example_id in examples.keys():
examples[example_id] = {
**examples[example_id],
'example_id': example_id,
'dataset': dataset_name,
}
return lazy_dataset.from_dict(examples)
def get_segments(
time_ranges, max_segment_length=60 * 60, min_segment_length=.1,
segment_key_prefix='', dataset=None
):
"""prepares segment dicts and adds audio paths to dicts.
Args:
time_ranges:
max_segment_length:
min_segment_length:
segment_key_prefix: prefix in example_id of a segment.
example_id will be <prefix><segment_onset>_<segment_offset>
dataset:
Returns:
"""
segments = {
'{}{:.0f}_{:.0f}'.format(
segment_key_prefix, segment_start,
min(segment_start + max_segment_length, stop_time)
): {
'timestamp': segment_start,
'audio_length': min(max_segment_length, stop_time - segment_start)
}
for start_time, stop_time in time_ranges
for segment_start in np.arange(
start_time, stop_time, max_segment_length
) if (
min(max_segment_length, stop_time - segment_start)
>= min_segment_length
)
}
if dataset is not None:
add_audio_paths(segments, dataset)
return segments
def get_session_segments(
sessions: (list, tuple), max_segment_length=1e6, min_segment_length=.1,
session_key="scene", segment_key_prefix='', dataset=None
):
"""prepares segment dicts from single sessions, i.e. which not span over
session changes, and adds audio paths to dicts.
Args:
sessions:
max_segment_length:
min_segment_length:
session_key:
segment_key_prefix: prefix in example_id of a segment.
example_id will be <prefix><segment_onset>_<segment_offset>
dataset:
Returns:
"""
sessions = sorted(sessions, key=lambda x: x[1])
segments = {}
for (label, session_start, session_stop) in sessions:
for segment_start in np.arange(
session_start, session_stop, max_segment_length
):
if session_stop - segment_start >= min_segment_length:
key = '{}{:.0f}_{:.0f}'.format(
segment_key_prefix, segment_start,
min(segment_start + max_segment_length, session_stop)
)
segments[key] = {
'timestamp': segment_start,
'audio_length': min(
max_segment_length, session_stop - segment_start
)
}
if session_key is not None:
segments[key][session_key] = label
if dataset is not None:
add_audio_paths(segments, dataset)
return segments
class AudioReader:
"""
takes an example (or segment) and reads and concatenates the audio files.
"""
def __init__(self, source_sample_rate=16000, target_sample_rate=16000):
"""
Args:
source_sample_rate: sample rate of the stored audio file
target_sample_rate: target sample rate. If != source_sample_rate,
the audio will be resampled.
"""
self.source_sample_rate = source_sample_rate
self.target_sample_rate = target_sample_rate
def read_file(self, filepath, start_sample=0, stop_sample=None):
if isinstance(filepath, (list, tuple)):
start_sample = start_sample \
if isinstance(start_sample, (list, tuple)) \
else len(filepath) * [start_sample]
stop_sample = stop_sample \
if isinstance(stop_sample, (list, tuple)) \
else len(filepath) * [stop_sample]
return np.concatenate([
self.read_file(filepath_, start_, stop_)
for filepath_, start_, stop_ in zip(
filepath, start_sample, stop_sample
)
], axis=-1)
filepath = str(filepath)
x, sr = soundfile.read(
filepath, start=start_sample, stop=stop_sample, always_2d=True
)
assert sr == self.source_sample_rate
if self.target_sample_rate != sr:
x = samplerate.resample(
x, self.target_sample_rate / sr, "sinc_fastest"
)
return x.T
def __call__(self, example):
audio_path = example["audio_path"]
start_samples = 0
if "audio_start_samples" in example:
start_samples = example["audio_start_samples"]
stop_samples = None
if "audio_stop_samples" in example:
stop_samples = example["audio_stop_samples"]
audio = self.read_file(audio_path, start_samples, stop_samples)
example["audio_data"] = audio
return example
| 35.856764
| 79
| 0.592247
|
4a0fb6e8a0fd311558af9b18c6ede101c674b8e3
| 3,761
|
py
|
Python
|
tsrc/test/test_test_helpers.py
|
pdecat/tsrc
|
472778a473a31a1dc1093f9a5487facbd0bd8643
|
[
"BSD-3-Clause"
] | null | null | null |
tsrc/test/test_test_helpers.py
|
pdecat/tsrc
|
472778a473a31a1dc1093f9a5487facbd0bd8643
|
[
"BSD-3-Clause"
] | null | null | null |
tsrc/test/test_test_helpers.py
|
pdecat/tsrc
|
472778a473a31a1dc1093f9a5487facbd0bd8643
|
[
"BSD-3-Clause"
] | 1
|
2020-09-03T13:34:42.000Z
|
2020-09-03T13:34:42.000Z
|
from path import Path
import tsrc
import tsrc.git
from tsrc.test.helpers.git_server import GitServer
def read_remote_manifest(workspace_path: Path, git_server: GitServer) -> tsrc.Manifest:
tsrc.git.run(workspace_path, "clone", git_server.manifest_url)
manifest_yml = workspace_path / "manifest/manifest.yml"
manifest = tsrc.manifest.load(manifest_yml)
return manifest
def test_git_server_add_repo_can_clone(
workspace_path: Path, git_server: GitServer
) -> None:
foobar_url = git_server.add_repo("foo/bar")
tsrc.git.run(workspace_path, "clone", foobar_url)
assert (workspace_path / "bar").exists()
def test_git_server_can_add_copies(workspace_path: Path, git_server: GitServer) -> None:
git_server.add_repo("foo")
git_server.manifest.set_repo_file_copies("foo", [("foo.txt", "top.txt")])
manifest = read_remote_manifest(workspace_path, git_server)
assert manifest.copyfiles == [("foo/foo.txt", "top.txt")]
def test_can_configure_gitlab(tmp_path: Path, git_server: GitServer) -> None:
test_url = "http://gitlab.example.org"
git_server.manifest.configure_gitlab(url=test_url)
manifest = read_remote_manifest(tmp_path, git_server)
assert manifest.gitlab
assert manifest.gitlab["url"] == test_url
def test_can_configure_github_enterprise(tmp_path: Path, git_server: GitServer) -> None:
test_url = "http://github.example.com"
git_server.manifest.configure_github_enterprise(url=test_url)
manifest = read_remote_manifest(tmp_path, git_server)
assert manifest.github_enterprise
assert manifest.github_enterprise["url"] == test_url
def test_git_server_add_repo_updates_manifest(
workspace_path: Path, git_server: GitServer
) -> None:
git_server.add_repo("foo/bar")
git_server.add_repo("spam/eggs")
manifest = read_remote_manifest(workspace_path, git_server)
repos = manifest.get_repos()
assert len(repos) == 2
for repo in repos:
clone_url = repo.clone_url
_, out = tsrc.git.run_captured(workspace_path, "ls-remote", clone_url)
assert "refs/heads/master" in out
def test_git_server_change_manifest_branch(
workspace_path: Path, git_server: GitServer
) -> None:
git_server.add_repo("foo")
git_server.manifest.change_branch("devel")
git_server.add_repo("bar")
tsrc.git.run(workspace_path, "clone", git_server.manifest_url, "--branch", "devel")
manifest_yml = workspace_path / "manifest/manifest.yml"
manifest = tsrc.manifest.load(manifest_yml)
assert len(manifest.get_repos()) == 2
def test_git_server_change_repo_branch(
workspace_path: Path, git_server: GitServer
) -> None:
foo_url = git_server.add_repo("foo")
git_server.change_repo_branch("foo", "devel")
git_server.push_file("foo", "devel.txt", contents="this is devel\n")
tsrc.git.run(workspace_path, "clone", foo_url, "--branch", "devel")
foo_path = workspace_path / "foo"
assert (foo_path / "devel.txt").text() == "this is devel\n"
def test_git_server_tag(workspace_path: Path, git_server: GitServer) -> None:
foo_url = git_server.add_repo("foo")
git_server.tag("foo", "v0.1")
_, out = tsrc.git.run_captured(workspace_path, "ls-remote", foo_url)
assert "refs/tags/v0.1" in out
def test_git_server_default_branch_devel(
workspace_path: Path, git_server: GitServer
) -> None:
foo_url = git_server.add_repo("foo", default_branch="devel")
tsrc.git.run(workspace_path, "clone", foo_url)
foo_path = workspace_path / "foo"
cloned_branch = tsrc.git.get_current_branch(foo_path)
assert cloned_branch == "devel"
manifest = read_remote_manifest(workspace_path, git_server)
foo_config = manifest.get_repo("foo")
assert foo_config.branch == "devel"
| 35.819048
| 88
| 0.731986
|
4a0fb75923a82d2c47e7a7e97f30ef275bae1b3d
| 18,100
|
py
|
Python
|
asciimatics/parsers.py
|
ekapujiw2002/asciimatics
|
68ecfb9d9769989565f4d669c824acde90bc746c
|
[
"Apache-2.0"
] | null | null | null |
asciimatics/parsers.py
|
ekapujiw2002/asciimatics
|
68ecfb9d9769989565f4d669c824acde90bc746c
|
[
"Apache-2.0"
] | null | null | null |
asciimatics/parsers.py
|
ekapujiw2002/asciimatics
|
68ecfb9d9769989565f4d669c824acde90bc746c
|
[
"Apache-2.0"
] | null | null | null |
"""
This module provides parsers to create ColouredText objects from embedded control strings.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
from builtins import str
from future.utils import with_metaclass
from abc import ABCMeta, abstractmethod
from logging import getLogger
import asciimatics.constants as constants
from asciimatics.utilities import _DotDict
# Diagnostic logging
logger = getLogger(__name__)
class Parser(with_metaclass(ABCMeta, object)):
"""
Abstract class to represent text parsers that extract colour control codes from raw text and
convert them to displayable text and associated colour maps.
"""
#: Command to display some text. Parameter is the text to display
DISPLAY_TEXT = 0
#: Command to change active colour tuple. Parameters are the 3-tuple of (fg, attr, bg)
CHANGE_COLOURS = 1
#: Command to move cursor to abs position. Parameters are (x, y) where each are absolute positions.
MOVE_ABSOLUTE = 2
#: Command to move cursor to relative position. Parameters are (x, y) where each are relative positions.
MOVE_RELATIVE = 3
#: Command to delete part of the current line. Params are 0, 1 and 2 for end, start, all.
DELETE_LINE = 4
#: Command to delete next N characters from this line.
DELETE_CHARS = 5
#: Next tab stop
NEXT_TAB = 6
#: Set cursor visibility. Param is boolean setting True=visible
SHOW_CURSOR = 7
#: Clear the screen. No parameters.
CLEAR_SCREEN = 8
#: Save the cursor position. No parameters.
SAVE_CURSOR = 9
#: Restore the cursor position. No parameters.
RESTORE_CURSOR = 10
def __init__(self):
"""
Initialize the parser.
"""
self._state = None
def reset(self, text, colours):
"""
Reset the parser to analyze the supplied raw text.
:param text: raw text to process.
:param colours: colour tuple to initialise the colour map.
"""
self._state = _DotDict()
self._state.text = text
# Force colours to be mutable (in case a tuple was passed in).
self._state.attributes = [x for x in colours] if colours else None
@abstractmethod
def parse(self):
"""
Generator to return coloured text from raw text.
Generally returns a stream of text/color tuple/offset tuples. If there is a colour update with no
visible text, the first element of the tuple may be None.
:returns: a 3-tuple of (start offset in raw text, command to execute, parameters)
"""
def append(self, text):
"""
Append more text to the current text being processed.
:param text: raw text to process.
"""
self._state.text += text
class ControlCodeParser(Parser):
"""
Parser to replace all control codes with a readable version - e.g. "^M" for carriage return.
"""
def parse(self):
"""
Generator to return coloured text from raw text.
:returns: a 3-tuple of (start offset in raw text, command to execute, parameters)
"""
if self._state.attributes:
yield (0, Parser.CHANGE_COLOURS, tuple(self._attributes))
offset = 0
while len(self._state.text) > 0:
letter = self._state.text[0]
if ord(letter) < 32:
yield (offset, Parser.DISPLAY_TEXT, "^" + chr(ord("@") + ord(letter)))
else:
yield (offset, Parser.DISPLAY_TEXT, letter)
offset += 1
self._state.text = self._state.text[1:]
class AsciimaticsParser(Parser):
"""
Parser to handle Asciimatics rendering escape strings.
"""
# Regular expression for use to find colour sequences in multi-colour text.
# It should match ${n}, ${m,n} or ${m,n,o}
_colour_sequence = re.compile(constants.COLOUR_REGEX)
def parse(self):
"""
Generator to return coloured text from raw text.
:returns: a 3-tuple of (start offset in raw text, command to execute, parameters)
"""
if self._state.attributes:
yield (0, Parser.CHANGE_COLOURS, tuple(self._state.attributes))
offset = last_offset = 0
while len(self._state.text) > 0:
match = self._colour_sequence.match(str(self._state.text))
if match is None:
yield (last_offset, Parser.DISPLAY_TEXT, self._state.text[0])
self._state.text = self._state.text[1:]
offset += 1
last_offset = offset
else:
# The regexp either matches:
# - 2,3,4 for ${c,a,b}
# - 5,6 for ${c,a}
# - 7 for ${c}.
if match.group(2) is not None:
attributes = (int(match.group(2)),
constants.MAPPING_ATTRIBUTES[match.group(3)],
int(match.group(4)))
elif match.group(5) is not None:
attributes = (int(match.group(5)),
constants.MAPPING_ATTRIBUTES[match.group(6)],
None)
else:
attributes = (int(match.group(7)), 0, None)
yield (last_offset, Parser.CHANGE_COLOURS, attributes)
offset += 3 + len(match.group(1))
self._state.text = match.group(8)
class AnsiTerminalParser(Parser):
"""
Parser to handle ANSI terminal escape codes.
"""
# Regular expression for use to find colour sequences in multi-colour text.
_colour_sequence = re.compile(r"^(\x1B\[([^@-~]*)([@-~]))(.*)")
_os_cmd = re.compile(r"^(\x1B].*\x07)(.*)")
def reset(self, text, colours):
"""
Reset the parser to analyze the supplied raw text.
:param text: raw text to process.
:param colours: colour tuple to initialise the colour map.
"""
super(AnsiTerminalParser, self).reset(text, colours)
if self._state.attributes is None:
self._state.init_colours = False
self._state.attributes = [None, None, None]
else:
self._state.init_colours = True
self._state.offset = 0
self._state.last_offset = 0
self._state.cursor = 0
def parse(self):
def _handle_escape(st):
match = self._colour_sequence.match(str(st.text))
if match is None:
# Not a CSI sequence... Check for some other options.
match = self._os_cmd.match(str(st.text))
if match:
# OS command - just swallow it.
return len(match.group(1)), None
elif len(st.text) > 1 and st.text[1] == "M":
# Reverse Index - i.e. move up/scroll
return 2, [(st.last_offset, Parser.MOVE_RELATIVE, (0, -1))]
# Unknown escape - guess how many characters to ignore - most likely just the next char
# unless we can see the start of a new sequence.
logger.debug("Ignoring: %s", st.text[0:2])
if len(st.text) < 2:
return -1, None
if st.text[1] in ("[", "]"):
return -1, None
return (2, None) if st.text[1] != "(" else (3, None)
else:
# CSI sequence - look for the various options...
results = []
if match.group(3) == "m":
# We have found a SGR escape sequence ( CSI ... m ). These have zero or more
# embedded arguments, so create a simple FSM to process the parameter stream.
in_set_mode = False
in_index_mode = False
in_rgb_mode = False
skip_size = 0
attribute_index = 0
last_attributes = tuple(st.attributes)
for parameter in match.group(2).split(";"):
try:
parameter = int(parameter)
except ValueError:
parameter = 0
if in_set_mode:
# We are processing a set fore/background colour code
if parameter == 5:
in_index_mode = True
elif parameter == 2:
in_rgb_mode = True
skip_size = 3
else:
logger.info(("Unexpected colour setting", parameter))
break
in_set_mode = False
elif in_index_mode:
# We are processing a 5;n sequence for colour indeces
st.attributes[attribute_index] = parameter
in_index_mode = False
elif in_rgb_mode:
# We are processing a 2;r;g;b sequence for RGB colours - just ignore.
skip_size -= 1
if skip_size <= 0:
in_rgb_mode = False
else:
# top-level stream processing
if parameter == 0:
# Reset
st.attributes = [constants.COLOUR_WHITE,
constants.A_NORMAL,
constants.COLOUR_BLACK]
elif parameter == 1:
# Bold
st.attributes[1] = constants.A_BOLD
elif parameter in (2, 22):
# Faint/normal - faint not supported so treat as normal
st.attributes[1] = constants.A_NORMAL
elif parameter == 7:
# Inverse
st.attributes[1] = constants.A_REVERSE
elif parameter == 27:
# Inverse off - assume that means normal
st.attributes[1] = constants.A_NORMAL
elif parameter in range(30, 38):
# Standard foreground colours
st.attributes[0] = parameter - 30
elif parameter in range(40, 48):
# Standard background colours
st.attributes[2] = parameter - 40
elif parameter == 38:
# Set foreground colour - next parameter is either 5 (index) or 2 (RGB color)
in_set_mode = True
attribute_index = 0
elif parameter == 48:
# Set background colour - next parameter is either 5 (index) or 2 (RGB color)
in_set_mode = True
attribute_index = 2
elif parameter in range(90, 98):
# Bright foreground colours
st.attributes[0] = parameter - 82
elif parameter in range(100, 108):
# Bright background colours
st.attributes[2] = parameter - 92
else:
logger.debug("Ignoring parameter: %s", parameter)
new_attributes = tuple(st.attributes)
if last_attributes != new_attributes:
results.append((st.last_offset, Parser.CHANGE_COLOURS, new_attributes))
elif match.group(3) == "K":
# This is a line delete sequence. Parameter defines which parts to delete.
param = match.group(2)
if param in ("", "0"):
# Delete to end of line
results.append((self._state.last_offset, Parser.DELETE_LINE, 0))
elif param == "1":
# Delete from start of line
results.append((self._state.last_offset, Parser.DELETE_LINE, 1))
elif param == "2":
# Delete whole line
results.append((self._state.last_offset, Parser.DELETE_LINE, 2))
elif match.group(3) == "P":
# This is a character delete sequence. Parameter defines how many to delete.
param = 1 if match.group(2) == "" else int(match.group(2))
results.append((self._state.last_offset, Parser.DELETE_CHARS, param))
elif match.group(3) == "A":
# Move cursor up. Parameter defines how far to move..
param = 1 if match.group(2) == "" else int(match.group(2))
results.append((self._state.last_offset, Parser.MOVE_RELATIVE, (0, -param)))
elif match.group(3) == "B":
# Move cursor down. Parameter defines how far to move..
param = 1 if match.group(2) == "" else int(match.group(2))
results.append((self._state.last_offset, Parser.MOVE_RELATIVE, (0, param)))
elif match.group(3) == "C":
# Move cursor forwards. Parameter defines how far to move..
param = 1 if match.group(2) == "" else int(match.group(2))
results.append((self._state.last_offset, Parser.MOVE_RELATIVE, (param, 0)))
elif match.group(3) == "D":
# Move cursor backwards. Parameter defines how far to move..
param = 1 if match.group(2) == "" else int(match.group(2))
results.append((self._state.last_offset, Parser.MOVE_RELATIVE, (-param, 0)))
elif match.group(3) == "H":
# Move cursor to specified position.
x, y = 0, 0
params = match.group(2).split(";")
y = int(params[0]) - 1 if params[0] != "" else 0
if len(params) > 1:
x = int(params[1]) - 1 if params[1] != "" else 0
results.append((self._state.last_offset, Parser.MOVE_ABSOLUTE, (x, y)))
elif match.group(3) == "h" and match.group(2) == "?25":
# Various DEC private mode commands - look for cursor visibility, ignore others.
results.append((self._state.last_offset, Parser.SHOW_CURSOR, True))
elif match.group(3) == "l" and match.group(2) == "?25":
# Various DEC private mode commands - look for cursor visibility, ignore others.
results.append((self._state.last_offset, Parser.SHOW_CURSOR, False))
elif match.group(3) == "h" and match.group(2) == "?1049":
# This should really create an alternate screen, but clearing is a close
# approximation
results.append((self._state.last_offset, Parser.CLEAR_SCREEN, None))
elif match.group(3) == "l" and match.group(2) == "?1049":
# This should really return to the normal screen, but clearing is a close
# approximation
results.append((self._state.last_offset, Parser.CLEAR_SCREEN, None))
elif match.group(3) == "J" and match.group(2) == "2":
# Clear the screen.
results.append((self._state.last_offset, Parser.CLEAR_SCREEN, None))
elif match.group(3) == "s":
# Save cursor pos
results.append((self._state.last_offset, Parser.SAVE_CURSOR, None))
elif match.group(3) == "u":
# Restore cursor pos
results.append((self._state.last_offset, Parser.RESTORE_CURSOR, None))
else:
logger.debug("Ignoring control: %s", match.group(1))
return len(match.group(1)), results
if self._state.init_colours:
self._state.init_colours = False
yield (0, Parser.CHANGE_COLOURS, self._state.attributes)
while len(self._state.text) > 0:
char = ord(self._state.text[0])
new_offset = 1
if char > 31:
yield (self._state.last_offset, Parser.DISPLAY_TEXT, self._state.text[0])
self._state.last_offset = self._state.offset + 1
elif char == 8:
# Back space
yield (self._state.last_offset, Parser.MOVE_RELATIVE, (-1, 0))
elif char == 9:
# Tab
yield (self._state.last_offset, Parser.NEXT_TAB, None)
elif char == 13:
# Carriage return
yield (self._state.last_offset, Parser.MOVE_ABSOLUTE, (0, None))
elif char == 27:
new_offset, results = _handle_escape(self._state)
if new_offset == -1:
break
if results is not None:
for result in results:
yield result
else:
logger.debug("Ignoring character: %d", char)
yield (self._state.last_offset, Parser.DISPLAY_TEXT, " ")
self._state.last_offset = self._state.offset + 1
self._state.offset += new_offset
self._state.text = self._state.text[new_offset:]
| 47.135417
| 109
| 0.511823
|
4a0fb7c246728fb3ab75da26ad7694c85ff15c03
| 14,556
|
py
|
Python
|
tests/test_providers.py
|
OnroerendErfgoed/skosprovider_getty
|
06df6cc97ca8202f3335a5d040ce8a50afd6c067
|
[
"MIT"
] | 2
|
2018-03-02T20:14:56.000Z
|
2018-05-09T13:31:13.000Z
|
tests/test_providers.py
|
OnroerendErfgoed/skosprovider_getty
|
06df6cc97ca8202f3335a5d040ce8a50afd6c067
|
[
"MIT"
] | 70
|
2015-01-29T14:11:41.000Z
|
2022-01-18T06:51:28.000Z
|
tests/test_providers.py
|
OnroerendErfgoed/skosprovider_getty
|
06df6cc97ca8202f3335a5d040ce8a50afd6c067
|
[
"MIT"
] | 1
|
2016-10-12T16:58:10.000Z
|
2016-10-12T16:58:10.000Z
|
#!/usr/bin/python
import unittest
import pytest
import requests
from skosprovider.exceptions import ProviderUnavailableException
from skosprovider_getty.providers import AATProvider
from skosprovider_getty.providers import GettyProvider
from skosprovider_getty.providers import TGNProvider
from skosprovider_getty.providers import ULANProvider
global clazzes, ontologies
clazzes = []
ontologies = {}
class GettyProviderTests(unittest.TestCase):
def test_set_custom_session(self):
sess = requests.Session()
provider = AATProvider({'id': 'AAT'}, session=sess)
self.assertEqual(sess, provider.session)
def test_allowed_instance_scopes(self):
provider = AATProvider({'id': 'AAT'})
assert provider.allowed_instance_scopes == [
'single', 'threaded_thread'
]
def test_override_instance_scopes(self):
provider = AATProvider(
{'id': 'AAT'},
allowed_instance_scopes=['single']
)
assert provider.allowed_instance_scopes == ['single']
def test_concept_scheme_is_cached(self):
provider = AATProvider(
{'id': 'AAT'}
)
assert provider._conceptscheme is None
cs = provider.concept_scheme
assert provider._conceptscheme == cs
def test_get_vocabulary_uri(self):
provider = AATProvider(
{'id': 'AAT'}
)
assert 'http://vocab.getty.edu/aat/' == provider.get_vocabulary_uri()
def test_get_vocabulary_uri_does_not_load_cs(self):
provider = ULANProvider(
{'id': 'ULAN'}
)
assert provider._conceptscheme is None
assert 'http://vocab.getty.edu/ulan/' == provider.get_vocabulary_uri()
assert provider._conceptscheme is None
def test_get_by_id_concept(self):
concept = AATProvider({'id': 'AAT'}).get_by_id('300007466', change_notes=True)
concept = concept.__dict__
self.assertEqual(concept['uri'], 'http://vocab.getty.edu/aat/300007466')
self.assertEqual(concept['type'], 'concept')
self.assertIsInstance(concept['labels'], list)
preflabels = [{'nl': 'kerken'}, {'de': 'Kirche (Gebäude)'}]
preflabels_conc = [{label.language: label.label} for label in concept['labels']
if label.type == 'prefLabel']
self.assertGreater(len(preflabels_conc), 0)
for label in preflabels:
self.assertIn(label, preflabels_conc)
altlabels = [{'nl': 'kerk'}, {'de': 'kirchen (Gebäude)'}]
altlabels_conc = [{label.language: label.label} for label in concept['labels']
if label.type == 'altLabel']
self.assertGreater(len(altlabels_conc), 0)
for label in altlabels:
self.assertIn(label, altlabels_conc)
self.assertGreater(len(concept['notes']), 0)
self.assertEqual(concept['id'], '300007466')
# todo gvp:broader is not a subproperty of skos:broader anymore. This is the
# reason why there are no broader elements anymore belonging to the Concept...
# to be decided what to do...
# self.assertEqual(concept['broader'][0], '300007391')
self.assertIn('300312247', concept['related'])
def test_get_by_id_collection(self):
collection = AATProvider({'id': 'AAT'}).get_by_id('300007473')
assert collection is not False
assert collection.uri == 'http://vocab.getty.edu/aat/300007473'
assert collection.type == 'collection'
assert '<kerken naar vorm>' in [
label.label for label in collection.labels if label.language == 'nl' and label.type == 'prefLabel']
assert len(collection.notes) == 0
def test_get_by_id_invalid(self):
concept = AATProvider({'id': 'AAT'}).get_by_id('123')
self.assertFalse(concept)
def test_get_by_id_superordinates(self):
# Default GettyProvider is an AAT provider
collection = GettyProvider({'id': 'AAT'}).get_by_id('300138225-array')
assert collection.id == '300138225-array'
assert '300138225' in collection.superordinates
def test_get_by_id_subOrdinateArrays(self):
# Default GettyProvider is an AAT provider
concept = GettyProvider({'id': 'AAT'}).get_by_id('300138225')
concept = concept.__dict__
self.assertEqual(concept['id'], '300138225')
self.assertIn('300138225-array', concept['subordinate_arrays'])
# 300126352
def test_get_by_uri(self):
# Default GettyProvider is an AAT provider
concept = GettyProvider({'id': 'AAT'}).get_by_uri('http://vocab.getty.edu/aat/300007466')
concept = concept.__dict__
self.assertEqual(concept['uri'], 'http://vocab.getty.edu/aat/300007466')
self.assertEqual(concept['id'], '300007466')
def test_get_by_uri_invalid(self):
# Default GettyProvider is an AAT provider
concept = GettyProvider({'id': 'AAT'}).get_by_uri('urn:skosprovider:5')
self.assertFalse(concept)
concept = GettyProvider({'id': 'AAT'}).get_by_uri('https://id.erfgoed.net/thesauri/materialen/7')
self.assertFalse(concept)
def test_get_by_id_tgn(self):
concept = TGNProvider({'id': 'TGN'}).get_by_id('1000063')
concept = concept.__dict__
self.assertEqual(concept['uri'], 'http://vocab.getty.edu/tgn/1000063')
self.assertIn('België', [label.label for label in concept['labels']
if label.language == 'nl' and label.type == 'prefLabel'])
def test_get_all(self):
kwargs = {'language': 'nl'}
self.assertFalse(TGNProvider({'id': 'TGN'}).get_all(**kwargs))
def test_get_top_display(self):
kwargs = {'language': 'nl'}
top_TGN_display = TGNProvider({'id': 'TGN', 'default_language': 'en'}).get_top_display(**kwargs)
self.assertIsInstance(top_TGN_display, list)
self.assertGreater(len(top_TGN_display), 0)
keys_first_display = top_TGN_display[0].keys()
for key in ['id', 'type', 'label', 'uri']:
self.assertIn(key, keys_first_display)
self.assertIn('World', [label['label'] for label in top_TGN_display])
top_AAT_display = AATProvider({'id': 'AAT', 'default_language': 'nl'}).get_top_display()
self.assertIsInstance(top_AAT_display, list)
self.assertGreater(len(top_AAT_display), 0)
self.assertIn('Facet Stijlen en perioden', [label['label'] for label in top_AAT_display])
def test_get_top_concepts(self):
kwargs = {'language': 'nl'}
top_TGN_concepts = TGNProvider({'id': 'TGN'}).get_top_concepts(**kwargs)
self.assertIsInstance(top_TGN_concepts, list)
self.assertEqual(len(top_TGN_concepts), 0)
def test_get_childeren_display(self):
kwargs = {'language': 'nl'}
childeren_tgn_belgie = TGNProvider({'id': 'TGN', 'default_language': 'nl'}
).get_children_display('1000063', **kwargs)
self.assertIsInstance(childeren_tgn_belgie, list)
self.assertGreater(len(childeren_tgn_belgie), 0)
keys_first_display = childeren_tgn_belgie[0].keys()
for key in ['id', 'type', 'label', 'uri']:
self.assertIn(key, keys_first_display)
self.assertIn('Brussels Hoofdstedelijk Gewest', [label['label'] for label in childeren_tgn_belgie])
def test_expand(self):
all_childeren_churches = AATProvider({'id': 'AAT'}).expand('300007466')
self.assertIsInstance(all_childeren_churches, list)
self.assertGreater(len(all_childeren_churches), 0)
self.assertIn('300007466', all_childeren_churches)
def test_expand_invalid(self):
all_childeren_invalid = AATProvider({'id': 'AAT'}).expand('invalid')
self.assertFalse(all_childeren_invalid)
def test_expand_collection(self):
all_childeren_churches_by_fuction = AATProvider({'id': 'AAT'}).expand('300007494')
self.assertIsInstance(all_childeren_churches_by_fuction, list)
self.assertGreater(len(all_childeren_churches_by_fuction), 0)
self.assertNotIn('300007494', all_childeren_churches_by_fuction)
def test_find_without_label(self):
r = AATProvider({'id': 'AAT'}).find({'type': 'concept', 'collection': {'id': '300007466', 'depth': 'all'}})
self.assertIsInstance(r, list)
def test_find_wrong_type(self):
self.assertRaises(ValueError, AATProvider({'id': 'AAT'}).find, {
'type': 'collectie', 'collection': {'id': '300007466', 'depth': 'all'}})
def test_find_no_collection_id(self):
self.assertRaises(ValueError, AATProvider({'id': 'AAT'}).find, {
'type': 'collection', 'collection': {'depth': 'all'}})
def test_find_wrong_collection_depth(self):
self.assertRaises(ValueError, AATProvider({'id': 'AAT'}).find, {
'type': 'concept', 'collection': {'id': '300007466', 'depth': 'allemaal'}})
def test_find_concepts_in_collection(self):
r = AATProvider({'id': 'AAT'}).find({'label': 'church', 'type': 'concept',
'collection': {'id': '300007466', 'depth': 'all'}})
self.assertIsInstance(r, list)
self.assertGreater(len(r), 0)
for res in r:
assert res['type'] == 'concept'
def test_find_multiple_keywords(self):
r = AATProvider({'id': 'AAT'}).find({'label': 'church abbey', 'type': 'concept'})
self.assertIsInstance(r, list)
self.assertGreater(len(r), 0)
for res in r:
assert res['type'] == 'concept'
def test_find_member_concepts_in_collection(self):
r = AATProvider({'id': 'AAT'}).find({'label': 'church', 'type': 'concept',
'collection': {'id': '300007494', 'depth': 'members'}})
self.assertIsInstance(r, list)
self.assertGreater(len(r), 0)
for res in r:
assert res['type'] == 'concept'
def test_find_collections_in_collection(self):
r = AATProvider({'id': 'AAT'}).find({'label': 'church', 'type': 'collection',
'collection': {'id': '300007466', 'depth': 'all'}})
assert len(r) > 0
for res in r:
assert res['type'] == 'collection'
def test_find_concepts(self):
r = AATProvider({'id': 'AAT'}).find({'label': 'church', 'type': 'concept'})
self.assertIsInstance(r, list)
self.assertGreater(len(r), 0)
for res in r:
assert res['type'] == 'concept'
def test_find_concepts_kerk_zh(self):
r = AATProvider({'id': 'AAT', 'default_language': 'zh'}).find({'label': 'kerk', 'type': 'concept'})
self.assertIsInstance(r, list)
self.assertGreater(len(r), 0)
for res in r:
assert res['type'] == 'concept'
def test_find_concepts_kerk_language(self):
kwargs = {'language': 'nl'}
result = AATProvider({'id': 'AAT'}).find({'label': 'kerk', 'type': 'concept'}, **kwargs)
assert len(result) > 0
labels = []
for res in result:
assert res['type'] == 'concept'
labels.append(res['label'])
assert "kerken" in labels
def test_find_concepts_kerk(self):
r1 = AATProvider({'id': 'AAT'}).find({'label': 'kerk', 'type': 'concept'})
r2 = AATProvider({'id': 'AAT', 'default_language': 'en'}).find({'label': 'kirche', 'type': 'concept'})
r3 = AATProvider({'id': 'AAT', 'default_language': 'nl'}).find({'label': 'kerk', 'type': 'concept'})
assert len(r1) > 0
assert len(r2) > 0
assert len(r3) > 0
for res in r1:
assert 'church' in res['label'].lower()
assert res['type'] == 'concept'
for res in r2:
assert 'church' in res['label'].lower()
assert res['type'] == 'concept'
for res in r3:
assert 'kerk' in res['label'].lower()
assert res['type'] == 'concept'
def test_find_member_collections_in_collection(self):
r = AATProvider({'id': 'AAT'}).find({'label': 'church', 'type': 'collection',
'collection': {'id': '300007466', 'depth': 'members'}})
assert len(r) > 0
for res in r:
assert res['type'] == 'collection'
def test_find_matches(self):
r = AATProvider({'id': 'AAT'}).find({'matches': {'uri': 'http://id.loc.gov/authorities/subjects/sh85123119'}})
assert len(r) == 1
assert r[0]['type'] == 'concept'
assert r[0]['uri'] == 'http://vocab.getty.edu/aat/300191778'
def test_find_closematches(self):
r = AATProvider({'id': 'AAT'}).find(
{'matches': {'uri': 'http://id.loc.gov/authorities/subjects/sh85123119', 'type': 'close'}})
assert len(r) == 1
assert r[0]['type'] == 'concept'
assert r[0]['uri'] == 'http://vocab.getty.edu/aat/300191778'
def test_find_matches_no_uri(self):
with pytest.raises(ValueError):
AATProvider({'id': 'AAT'}).find({'matches': {'type': 'close'}})
def test_answer_wrong_query(self):
with pytest.raises(ProviderUnavailableException):
provider = GettyProvider({'id': 'test'}, vocab_id='aat', url='http://vocab.getty.edu/aat')
provider._get_answer("Wrong SPARQL query")
class ULANProviderTests(unittest.TestCase):
def test_ulan_exists(self):
ulan = ULANProvider({'id': 'ULAN'})
assert isinstance(ulan, ULANProvider)
def test_ulan_get_braem(self):
ulan = ULANProvider({'id': 'ULAN'})
braem = ulan.get_by_id(500082691)
assert braem.id == '500082691'
assert braem.label is not None
assert 'Braem' in braem.label('nl').label
def test_ulan_search_braem(self):
ulan = ULANProvider({'id': 'ULAN'})
res = ulan.find({'label': 'braem'})
assert any([a for a in res if a['id'] == '500082691'])
def test_ulan_search_braem_custom_sort(self):
ulan = ULANProvider({'id': 'ULAN'})
res = ulan.find({'label': 'braem'}, sort='id')
assert ['500082691', '500331524'] == [a['id'] for a in res]
res = ulan.find({'label': 'braem'}, sort='label', sort_order='desc')
assert ['500331524', '500082691'] == [a['id'] for a in res]
res = ulan.find({'label': 'braem'}, sort='sortlabel', sort_order='desc')
assert ['500331524', '500082691'] == [a['id'] for a in res]
| 43.711712
| 118
| 0.612256
|
4a0fb85799c12793ec854f33fc8dfaa8d4b07820
| 8,774
|
py
|
Python
|
mars/lib/nvutils.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | 1
|
2020-06-25T13:51:16.000Z
|
2020-06-25T13:51:16.000Z
|
mars/lib/nvutils.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
mars/lib/nvutils.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import uuid
from collections import namedtuple
from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
create_string_buffer, Structure, POINTER, CDLL
logger = logging.getLogger(__name__)
# Some constants taken from cuda.h
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
CU_NO_CUDA_CAPABLE_DEVICE_DETECTED = 100
# nvml constants
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
NVML_DRIVER_NOT_LOADED = 9
class _CUuuid_t(Structure):
_fields_ = [
('bytes', c_char * 16)
]
class _nvmlUtilization_t(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
class _struct_nvmlDevice_t(Structure):
pass # opaque handle
_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
class _nvmlBAR1Memory_t(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
def _load_nv_library(*libnames):
for lib in libnames:
try:
return CDLL(lib)
except OSError:
continue
_cuda_lib = _nvml_lib = None
_cu_device_info = namedtuple('_cu_device_info', 'index uuid name multiprocessors cuda_cores threads')
_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
_init_pid = None
_gpu_count = None
_driver_info = None
_device_infos = dict()
_no_device_warned = False
class NVError(Exception):
def __init__(self, msg, *args, errno=None):
self._errno = errno
super().__init__(msg or 'Unknown error', *args)
def __str__(self):
return '(%s) %s' % (self._errno, super().__str__())
@property
def errno(self):
return self._errno
@property
def message(self):
return super().__str__()
class NVDeviceAPIError(NVError):
pass
class NVMLAPIError(NVError):
pass
def _cu_check_error(result):
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
raise NVDeviceAPIError(_error_str.value.decode(), errno=result)
_nvmlErrorString = None
def _nvml_check_error(result):
global _nvmlErrorString
if _nvmlErrorString is None:
_nvmlErrorString = _nvml_lib.nvmlErrorString
_nvmlErrorString.restype = c_char_p
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
raise NVMLAPIError(_error_str.decode(), errno=result)
_cu_process_var_to_cores = {
(1, 0): 8,
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32,
(2, 1): 48,
}
def _cu_get_processor_cores(major, minor):
return _cu_process_var_to_cores.get((major, minor), 192)
def _init_cp():
global _cuda_lib, _no_device_warned
if _init_pid == os.getpid():
return
_cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll')
if _cuda_lib is None:
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
except NVDeviceAPIError as ex:
if ex.errno == CU_NO_CUDA_CAPABLE_DEVICE_DETECTED:
_cuda_lib = None
if not _no_device_warned:
logger.warning('No CUDA device detected')
_no_device_warned = True
else:
logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
global _nvml_lib, _no_device_warned
if _init_pid == os.getpid():
return
_nvml_lib = _load_nv_library('libnvidia-ml.so', 'libnvidia-ml.dylib', 'nvml.dll')
if _nvml_lib is None:
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
except NVMLAPIError as ex:
if ex.errno == NVML_DRIVER_NOT_LOADED:
_nvml_lib = None
if not _no_device_warned:
logger.warning('Failed to load libnvidia-ml: %s, no CUDA device will be enabled', ex.message)
_no_device_warned = True
else:
logger.exception('Failed to initialize libnvidia-ml.')
return
def _init():
global _init_pid
_init_cp()
_init_nvml()
if _nvml_lib is not None and _cuda_lib is not None:
_init_pid = os.getpid()
def get_device_count():
global _gpu_count
if _gpu_count is not None:
return _gpu_count
_init_nvml()
if _nvml_lib is None:
return None
if 'CUDA_VISIBLE_DEVICES' in os.environ:
devices = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if not devices:
_gpu_count = 0
else:
_gpu_count = len(devices.split(','))
else:
n_gpus = c_uint()
_cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
_gpu_count = n_gpus.value
return _gpu_count
def get_driver_info():
global _driver_info
_init_nvml()
if _nvml_lib is None:
return None
if _driver_info is not None:
return _driver_info
version_buf = create_string_buffer(100)
cuda_version = c_uint()
_nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
_nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
_driver_info = _nvml_driver_info(
driver_version=version_buf.value.decode(),
cuda_version='%d.%d' % (cuda_version.value // 1000, cuda_version.value % 1000)
)
return _driver_info
def get_device_info(dev_index):
try:
return _device_infos[dev_index]
except KeyError:
pass
_init()
if _init_pid is None:
return None
device = c_int()
name_buf = create_string_buffer(100)
uuid_t = _CUuuid_t()
cc_major = c_int()
cc_minor = c_int()
cores = c_int()
threads_per_core = c_int()
_cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
_cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
_cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
_cu_check_error(_cuda_lib.cuDeviceComputeCapability(
byref(cc_major), byref(cc_minor), device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
if 'CUDA_VISIBLE_DEVICES' in os.environ:
real_dev_index = [int(s) for s in os.environ['CUDA_VISIBLE_DEVICES'].split(',')][dev_index]
else:
real_dev_index = dev_index
info = _device_infos[dev_index] = _cu_device_info(
index=real_dev_index,
uuid=uuid.UUID(bytes=uuid_t.bytes),
name=name_buf.value.decode(),
multiprocessors=cores.value,
cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
threads=cores.value * threads_per_core.value,
)
return info
def get_device_status(dev_index):
_init()
if _init_pid is None:
return None
device = _nvmlDevice_t()
utils = _nvmlUtilization_t()
temperature = c_uint()
memory_info = _nvmlBAR1Memory_t()
dev_uuid = get_device_info(dev_index).uuid
uuid_str = ('GPU-' + str(dev_uuid)).encode()
_nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
device, NVML_TEMPERATURE_GPU, byref(temperature)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
return _nvml_device_status(
gpu_util=utils.gpu,
mem_util=utils.memory,
temperature=temperature.value,
fb_total_mem=memory_info.total,
fb_free_mem=memory_info.free,
fb_used_mem=memory_info.used,
)
| 27.504702
| 109
| 0.691589
|
4a0fb893dbd9f9e913daeb32df4e03d8048c4b46
| 5,205
|
py
|
Python
|
dallinger/notifications.py
|
mensch72/Dallinger
|
56899d231e2f2fc9f84879f100bf998797249d3e
|
[
"MIT"
] | 1
|
2020-11-29T02:25:13.000Z
|
2020-11-29T02:25:13.000Z
|
dallinger/notifications.py
|
mensch72/Dallinger
|
56899d231e2f2fc9f84879f100bf998797249d3e
|
[
"MIT"
] | null | null | null |
dallinger/notifications.py
|
mensch72/Dallinger
|
56899d231e2f2fc9f84879f100bf998797249d3e
|
[
"MIT"
] | 1
|
2020-11-28T16:22:48.000Z
|
2020-11-28T16:22:48.000Z
|
import logging
import six
import smtplib
from cached_property import cached_property
from email.mime.text import MIMEText
logger = logging.getLogger(__file__)
CONFIG_PLACEHOLDER = u"???"
class InvalidEmailConfig(ValueError):
"""The configuration contained missing or invalid email-related values.
"""
class SMTPMailer(object):
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
self._sent = []
@cached_property
def server(self):
return get_email_server(self.host)
def send(self, subject, sender, recipients, body):
msg = self._make_email(subject, sender, recipients, body)
try:
self.server.starttls()
self.server.login(self.username, self.password)
self.server.sendmail(sender, recipients, msg.as_string())
self.server.quit()
except smtplib.SMTPException as ex:
six.raise_from(MessengerError("SMTP error sending HIT error email."), ex)
except Exception as ex:
six.raise_from(MessengerError("Unknown error sending HIT error email."), ex)
self._sent.append(msg)
def _make_email(self, subject, sender, recipients, body):
msg = MIMEText(body)
msg["Subject"] = subject
msg["To"] = ",".join(recipients)
msg["From"] = sender
return msg
class LoggingMailer(object):
def __init__(self):
self._sent = []
def send(self, subject, sender, recipients, body):
msg = "{}:\nSubject: {}\nSender: {}\nRecipients: {}\nBody:\n{}".format(
self.__class__.__name__, subject, sender, ", ".join(recipients), body
)
logger.info(msg)
self._sent.append(msg)
def get_mailer(config, strict=False):
"""Return an appropriate Messenger.
If we're in debug mode, or email settings aren't set, return a debug
version which logs the message instead of attempting to send a real
email.
"""
settings = EmailConfig(config)
if config.get("mode") == "debug":
return LoggingMailer()
problems = settings.validate()
if problems:
if strict:
raise InvalidEmailConfig(problems)
logger.info(problems + " Will log errors instead of emailing them.")
return LoggingMailer()
return SMTPMailer(
settings.smtp_host, settings.smtp_username, settings.smtp_password
)
def get_email_server(host):
"""Return an SMTP server using the specified host.
Abandon attempts to connect after 8 seconds.
"""
return smtplib.SMTP(host, timeout=8)
class MessengerError(Exception):
"""A message could not be relayed."""
class EmailConfig(object):
"""Extracts and validates email-related values from a Configuration
"""
mail_config_keys = {
"smtp_host",
"smtp_username",
"smtp_password",
"contact_email_on_error",
"dallinger_email_address",
}
def __init__(self, config):
self.smtp_host = config.get("smtp_host")
self.smtp_username = config.get("smtp_username", None)
self.contact_email_on_error = config.get("contact_email_on_error")
self.smtp_password = config.get("smtp_password", None)
self.dallinger_email_address = config.get("dallinger_email_address")
def as_dict(self):
cleaned = self.__dict__.copy()
password = self.smtp_password
if password and password != CONFIG_PLACEHOLDER:
cleaned["smtp_password"] = password[:3] + "......" + password[-1]
return cleaned
def validate(self):
"""Could this config be used to send a real email?"""
missing = []
for k in self.mail_config_keys:
attr = getattr(self, k, False)
if not attr or attr == CONFIG_PLACEHOLDER:
missing.append(k)
if missing:
return "Missing or invalid config values: {}".format(
", ".join(sorted(missing))
)
class NotifiesAdmin(object):
"""Quickly email the experiment admin/author with to/from addresses
taken from configuration.
"""
def __init__(self, email_settings, mailer):
self.fromaddr = email_settings.dallinger_email_address
self.toaddr = email_settings.contact_email_on_error
self.mailer = mailer
def send(self, subject, body):
self.mailer.send(subject, self.fromaddr, [self.toaddr], body)
def admin_notifier(config):
"""Return an appropriate NotifiesAdmin implementation.
If we're in debug mode, or email settings aren't set, return a debug
version which logs the message instead of attempting to send a real
email.
"""
settings = EmailConfig(config)
if config.get("mode") == "debug":
return NotifiesAdmin(settings, LoggingMailer())
problems = settings.validate()
if problems:
logger.info(problems + " Will log errors instead of emailing them.")
return NotifiesAdmin(settings, LoggingMailer())
return NotifiesAdmin(
settings,
SMTPMailer(settings.smtp_host, settings.smtp_username, settings.smtp_password),
)
| 30.438596
| 88
| 0.648991
|
4a0fb8f81ed56486ee94d175044fb8ea872e2c56
| 9,337
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/extension/test_categorical.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 603
|
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
venv/Lib/site-packages/pandas/tests/extension/test_categorical.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
venv/Lib/site-packages/pandas/tests/extension/test_categorical.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 35
|
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import string
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, CategoricalIndex, Timestamp
import pandas._testing as tm
from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
def make_data():
while True:
values = np.random.choice(list(string.ascii_letters), size=100)
# ensure we meet the requirements
# 1. first two not null
# 2. first and second are different
if values[0] != values[1]:
break
return values
@pytest.fixture
def dtype():
return CategoricalDtype()
@pytest.fixture
def data():
"""Length-100 array for this type.
* data[0] and data[1] should both be non missing
* data[0] and data[1] should not gbe equal
"""
return Categorical(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return Categorical([np.nan, "A"])
@pytest.fixture
def data_for_sorting():
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
@pytest.fixture
def data_missing_for_sorting():
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
@pytest.mark.skip(reason="Memory usage doesn't match")
def test_memory_usage(self, data):
# Is this deliberate?
super().test_memory_usage(data)
def test_contains(self, data, data_missing):
# GH-37867
# na value handling in Categorical.__contains__ is deprecated.
# See base.BaseInterFaceTests.test_contains for more details.
na_value = data.dtype.na_value
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
assert data_missing[0] in data_missing
# check the presence of na_value
assert na_value in data_missing
assert na_value not in data
# Categoricals can contain other nan-likes than na_value
for na_value_obj in tm.NULL_OBJECTS:
if na_value_obj is na_value:
continue
assert na_value_obj not in data
assert na_value_obj in data_missing # this line differs from super method
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
def test_concat_with_reindex(self, data):
pytest.xfail(reason="Deliberately upcast to object?")
class TestGetitem(base.BaseGetitemTests):
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self, data):
# CategoricalDtype.type isn't "correct" since it should
# be a parent of the elements (object). But don't want
# to break things by changing.
super().test_getitem_scalar(data)
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_pad(self, data_missing):
super().test_fillna_limit_pad(data_missing)
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="Unobserved categories included")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
def test_combine_add(self, data_repeated):
# GH 20825
# When adding categoricals in combine, result is a string
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
expected = pd.Series(
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series([a + val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
super().test_fillna_length_mismatch(data_missing)
def test_searchsorted(self, data_for_sorting):
if not data_for_sorting.ordered:
raise pytest.skip(reason="searchsorted requires ordered data.")
class TestCasting(base.BaseCastingTests):
@pytest.mark.parametrize("cls", [Categorical, CategoricalIndex])
@pytest.mark.parametrize("values", [[1, np.nan], [Timestamp("2000"), pd.NaT]])
def test_cast_nan_to_int(self, cls, values):
# GH 28406
s = cls(values)
msg = "Cannot (cast|convert)"
with pytest.raises((ValueError, TypeError), match=msg):
s.astype(int)
@pytest.mark.parametrize(
"expected",
[
pd.Series(["2019", "2020"], dtype="datetime64[ns, UTC]"),
pd.Series([0, 0], dtype="timedelta64[ns]"),
pd.Series([pd.Period("2019"), pd.Period("2020")], dtype="period[A-DEC]"),
pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval"),
pd.Series([1, np.nan], dtype="Int64"),
],
)
def test_cast_category_to_extension_dtype(self, expected):
# GH 28668
result = expected.astype("category").astype(expected.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype, expected",
[
(
"datetime64[ns]",
np.array(["2015-01-01T00:00:00.000000000"], dtype="datetime64[ns]"),
),
(
"datetime64[ns, MET]",
pd.DatetimeIndex(
[Timestamp("2015-01-01 00:00:00+0100", tz="MET")]
).array,
),
],
)
def test_consistent_casting(self, dtype, expected):
# GH 28448
result = Categorical("2015-01-01").astype(dtype)
assert result == expected
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
if op_name != "__rmod__":
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
else:
pytest.skip("rmod never called when string is first argument")
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
op_name = all_arithmetic_operators
if op_name != "__rmod__":
super().test_arith_series_with_scalar(data, op_name)
else:
pytest.skip("rmod never called when string is first argument")
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="cannot perform|unsupported operand"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == "__eq__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x == y)
assert (result == expected).all()
elif op_name == "__ne__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x != y)
assert (result == expected).all()
else:
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
op(data, other)
@pytest.mark.parametrize(
"categories",
[["a", "b"], [0, 1], [Timestamp("2019"), Timestamp("2020")]],
)
def test_not_equal_with_na(self, categories):
# https://github.com/pandas-dev/pandas/issues/32276
c1 = Categorical.from_codes([-1, 0], categories=categories)
c2 = Categorical.from_codes([0, 1], categories=categories)
result = c1 != c2
assert result.all()
class TestParsing(base.BaseParsingTests):
pass
| 31.650847
| 86
| 0.651066
|
4a0fb9dc55c57e8833859e2c48fa636409940f81
| 1,185
|
py
|
Python
|
formfactory/fields.py
|
AltusBarry/django-formfactory
|
8da378d0952bfd0eb9a08d49b17b5b95ee7e607d
|
[
"BSD-3-Clause"
] | null | null | null |
formfactory/fields.py
|
AltusBarry/django-formfactory
|
8da378d0952bfd0eb9a08d49b17b5b95ee7e607d
|
[
"BSD-3-Clause"
] | null | null | null |
formfactory/fields.py
|
AltusBarry/django-formfactory
|
8da378d0952bfd0eb9a08d49b17b5b95ee7e607d
|
[
"BSD-3-Clause"
] | null | null | null |
import markdown
from django.forms.fields import Field
from django.utils.text import mark_safe
from django.utils.translation import ugettext as _
from formfactory import widgets
class ParagraphField(Field):
widget = widgets.ParagraphWidget
def __init__(self, paragraph="", *args, **kwargs):
super(ParagraphField, self).__init__(*args, **kwargs)
# Always empty out label for a paragraph field.
self.label = ""
# No matter what is set, this field should never be required.
self.required = False
self.widget.is_required = False
# Fields should handle their own args not being set.
if paragraph == "":
paragraph = _("Please set a value for this field.")
# Pass the paragraph text to the widget without needing to override
# widget __init__. Process markdown here, its up to custom fields to
# worry about what they are trying to do, not factory.py
data = {
"base_attrs": self.widget.attrs,
"extra_attrs": {"paragraph": markdown.markdown(paragraph)}
}
attrs = self.widget.build_attrs(**data)
self.widget.attrs = attrs
| 32.916667
| 76
| 0.657384
|
4a0fbbf30ee1dbc6699712442fab3ea251ddc82f
| 2,358
|
py
|
Python
|
main.py
|
lavron/neopixel-uanimate
|
7a05b56c8f2f2a7f6600650646782d6dc5fc307b
|
[
"MIT"
] | 1
|
2019-11-25T02:31:01.000Z
|
2019-11-25T02:31:01.000Z
|
main.py
|
lavron/neopixel-uanimate
|
7a05b56c8f2f2a7f6600650646782d6dc5fc307b
|
[
"MIT"
] | null | null | null |
main.py
|
lavron/neopixel-uanimate
|
7a05b56c8f2f2a7f6600650646782d6dc5fc307b
|
[
"MIT"
] | 1
|
2019-10-08T06:30:32.000Z
|
2019-10-08T06:30:32.000Z
|
# https://www.youtube.com/watch?v=a1_O9AnuGB0
import neopixel
from machine import Pin
from neopixel_animate import NeopixelAnimate, mix, wave, random_color
import uasyncio as asyncio
import utime
import random
strip_len = 25
strip = neopixel.NeoPixel(Pin(25), strip_len)
GREEN = (0, 128, 0)
RED = (128, 0, 0)
BLUE = (0, 0, 128)
BLACK = (0, 0, 0)
direction = 1
class SoftMixAnimation(NeopixelAnimate):
def __init__(self, strip_len, duration_ms=0, **params):
super().__init__(strip_len, duration_ms=duration_ms, **params)
self.params['bg_color'] = random_color()
self.params['active_color'] = random_color()
self.params['direction'] = 1
def new_color(self):
self.params['bg_color'] = self.params['active_color']
self.params['active_color'] = random_color()
self.params['direction'] = 1 - self.params['direction']
def frame(self, offset):
edge_px = 8
bg_color = self.params['bg_color']
active_color = self.params['active_color']
active_px = self.len * offset
offset = active_px % 1
filled_px = int(active_px) - edge_px
result = [0] * self.len
for i in range(self.len):
if i < filled_px:
result[i] = active_color
elif i <= active_px:
j = i - filled_px
blur_offset = max((offset - 1 - j) /
edge_px + 1, 0) # magic formula
result[i] = mix(active_color, bg_color, blur_offset)
else:
result[i] = bg_color
# result = result[init_px:] + result[:init_px]
if self.params['direction'] == 1:
result.reverse()
for i in range(self.len):
self.leds[i] = result[i]
def new_color():
print("new_soft_mix callback")
soft_mix.new_color()
soft_mix = SoftMixAnimation(strip_len, 5000, callback=new_color)
async def process_animation():
frame = False
soft_mix.start()
while True:
frame = soft_mix.get_frame()
if frame:
# print("frame:", frame)
for i in range(strip_len):
strip[i] = frame[i][:]
strip.write()
await asyncio.sleep_ms(50)
loop = asyncio.get_event_loop()
loop.create_task(process_animation())
loop.run_forever()
| 24.821053
| 70
| 0.592027
|
4a0fbca92f86584bd74a744433b3008877b341f3
| 724
|
py
|
Python
|
picasso/picasso/context_processor.py
|
TejasM/picasso
|
8c6ff1b0f955810f8e70112a0401b981afb2c0ec
|
[
"MIT"
] | null | null | null |
picasso/picasso/context_processor.py
|
TejasM/picasso
|
8c6ff1b0f955810f8e70112a0401b981afb2c0ec
|
[
"MIT"
] | null | null | null |
picasso/picasso/context_processor.py
|
TejasM/picasso
|
8c6ff1b0f955810f8e70112a0401b981afb2c0ec
|
[
"MIT"
] | null | null | null |
from django.db.models import Q, Count
from picasso.index.models import Tag, Listing
__author__ = 'tmehta'
def get_current_tags(request):
full_tags = Tag.objects.annotate(num_listings=Count('listings')).filter(parent_tag=None, visible=True).order_by(
'-num_listings')
all_tags = Tag.objects.annotate(num_listings=Count('listings')).filter(visible=True).order_by('-num_listings')
all_tags = [str(x.tag_name) for x in all_tags if x.tag_name != 'Blank' and x.tag_name != '']
count = Listing.objects.count()
tags = [str(x.tag_name) for x in full_tags if x.tag_name != 'Blank' and x.tag_name != '']
return {'tags': tags, 'full_tags': full_tags, 'all_tags': all_tags, 'number_of_listings': count}
| 51.714286
| 116
| 0.714088
|
4a0fbde0ab1334cc3f044ec2a15e7a81cbb1e2b7
| 2,191
|
py
|
Python
|
awacs/cloudsearch.py
|
alanjjenkins/awacs
|
0065e1833eae6a6070edb4ab4f180fd10b26c19a
|
[
"BSD-2-Clause"
] | 358
|
2015-01-01T05:11:05.000Z
|
2022-03-20T14:11:39.000Z
|
awacs/cloudsearch.py
|
alanjjenkins/awacs
|
0065e1833eae6a6070edb4ab4f180fd10b26c19a
|
[
"BSD-2-Clause"
] | 171
|
2015-01-17T00:32:48.000Z
|
2022-03-28T02:02:57.000Z
|
awacs/cloudsearch.py
|
michael-k/awacs
|
ed3dc822d268f10b0cd83feb90fd279277e54ed4
|
[
"BSD-2-Clause"
] | 100
|
2015-01-04T16:34:34.000Z
|
2022-02-21T06:17:17.000Z
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon CloudSearch"
prefix = "cloudsearch"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AddTags = Action("AddTags")
BuildSuggesters = Action("BuildSuggesters")
CreateDomain = Action("CreateDomain")
DefineAnalysisScheme = Action("DefineAnalysisScheme")
DefineExpression = Action("DefineExpression")
DefineIndexField = Action("DefineIndexField")
DefineIndexFields = Action("DefineIndexFields")
DefineSuggester = Action("DefineSuggester")
DeleteAnalysisScheme = Action("DeleteAnalysisScheme")
DeleteDomain = Action("DeleteDomain")
DeleteExpression = Action("DeleteExpression")
DeleteIndexField = Action("DeleteIndexField")
DeleteSuggester = Action("DeleteSuggester")
DescribeAnalysisSchemes = Action("DescribeAnalysisSchemes")
DescribeAvailabilityOptions = Action("DescribeAvailabilityOptions")
DescribeDomainEndpointOptions = Action("DescribeDomainEndpointOptions")
DescribeDomains = Action("DescribeDomains")
DescribeExpressions = Action("DescribeExpressions")
DescribeIndexFields = Action("DescribeIndexFields")
DescribeScalingParameters = Action("DescribeScalingParameters")
DescribeServiceAccessPolicies = Action("DescribeServiceAccessPolicies")
DescribeSuggesters = Action("DescribeSuggesters")
IndexDocuments = Action("IndexDocuments")
ListDomainNames = Action("ListDomainNames")
ListTags = Action("ListTags")
RemoveTags = Action("RemoveTags")
UpdateAvailabilityOptions = Action("UpdateAvailabilityOptions")
UpdateDomainEndpointOptions = Action("UpdateDomainEndpointOptions")
UpdateScalingParameters = Action("UpdateScalingParameters")
UpdateServiceAccessPolicies = Action("UpdateServiceAccessPolicies")
document = Action("document")
search = Action("search")
suggest = Action("suggest")
| 37.775862
| 88
| 0.789137
|
4a0fbe2719131c405f418b5e80fde321064661b9
| 1,872
|
py
|
Python
|
Support_Vector_Machine(SVM)/SVM_Example.py
|
LeoZ123/Machine-Learning-Practice
|
dae55f52bb31f428526d6d60229bd1827c4e0af0
|
[
"MIT"
] | null | null | null |
Support_Vector_Machine(SVM)/SVM_Example.py
|
LeoZ123/Machine-Learning-Practice
|
dae55f52bb31f428526d6d60229bd1827c4e0af0
|
[
"MIT"
] | null | null | null |
Support_Vector_Machine(SVM)/SVM_Example.py
|
LeoZ123/Machine-Learning-Practice
|
dae55f52bb31f428526d6d60229bd1827c4e0af0
|
[
"MIT"
] | 1
|
2018-04-28T01:27:23.000Z
|
2018-04-28T01:27:23.000Z
|
'''
Created on Mar 16, 2017
@author: Leo Zhong
'''
print(__doc__)
import numpy as np #for calculation
import pylab as pl #for plot
from sklearn import svm
# create 40 separable points
np.random.seed(0) #generate the same random num
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
print ("w: ", w)
print ("a: ", a)
# print (" xx: ", xx)
# print (" yy: ", yy)
print ("support_vectors_: ", clf.support_vectors_)
print ("clf.coef_: ", clf.coef_)
# In scikit-learn coef_ attribute holds the vectors of the separating hyperplanes for linear models.
# It has shape (n_classes, n_features) if n_classes > 1 (multi-class one-vs-all) and (1, n_features) for binary classification.
#
# In this toy binary classification example, n_features == 2,
# Hence w = coef_[0] is the vector orthogonal to the hyperplane (the hyperplane is fully defined by it + the intercept).
#
# To plot this hyperplane in the 2D case (any hyperplane of a 2D plane is a 1D line), find a f as in y = f(x) = a.x + b.
# In this case a is the slope of the line and can be computed by a = -w[0] / w[1].
# plot the line, the points, and the nearest vectors to the plane
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
| 27.940299
| 127
| 0.655449
|
4a0fbe501fb97ee62f57f2fb04977e2c089e24ef
| 3,610
|
py
|
Python
|
server.py
|
eric2007/Python-game-of-UNO
|
d6e86a114139a6b84e9597109d8249eec4752187
|
[
"MIT"
] | null | null | null |
server.py
|
eric2007/Python-game-of-UNO
|
d6e86a114139a6b84e9597109d8249eec4752187
|
[
"MIT"
] | null | null | null |
server.py
|
eric2007/Python-game-of-UNO
|
d6e86a114139a6b84e9597109d8249eec4752187
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import socket
import threading
import time
import random
import json
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.bind(('127.0.0.1', 1026))
mysocket.listen(10)
mysocket.setblocking(False)
# mysocket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, 2)
password = ''
for _ in range(20):
password += random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-=')
print(password)
players = [[None, None],[None, None],[None, None],[None, None],[None, None]]
colorList = ['r','b','y','g']
cardLeft = list(range(1,109))
def getRandomCard():
global colorList
if len(cardLeft) == 0:
raise SystemExit("Out of cards!")
cardID = cardLeft.pop(random.randint(0,len(cardLeft)-1))
if cardID>104: # 4
return 'wild'
elif cardID > 100:
return 'wild4'
# cardID 0-100
cardColor = colorList[cardID % 4] # 0-3
cardID = cardID%25 # 0-24
if cardID == 0:
return cardColor+'0'
cardID = cardID%12
if cardID == 9:
return cardColor+'reverse'
elif cardID == 10:
return cardColor+'skip'
elif cardID == 11:
return cardColor+'draw2'
else: # number
return cardColor+str(cardID+1)
def cardFun(num):
tmp = []
for _ in range(num):
tmp.append(getRandomCard())
return tmp
def tcplink(sock, addr):
print("Accept connection from %s:%s" % addr)
# sock.send(b'Your password')
# data = sock.recv(20)
# if data.decode('utf-8').encode('utf-8') == password:
# sock.send(b'You\'re welcome!')
# else:
# print('Connection from %s:%s password wrong.' % addr)
# sock.send(b'Password wrong!')
# sock.close()
# return
# data = sock.recv(4)
ip = addr[0].split('.')
myaddr = ip[0]+ip[1]+ip[2]+ip[3]+str(addr[1])
# pid = addr.split(4)
# sock.send(myaddr.encode('utf-8'))
while True:
# p:play e:exit r:replay a:add game
dataRecv = ''
while True:
try:
server_replay = sock.recv(1).decode('utf-8')
dataRecv+=server_replay
print("debug#1")
except (BlockingIOError, socket.timeout):
print("debug#2")
break
try:
print("debug#3")
data = json.loads(dataRecv)
except json.decoder.JSONDecodeError:
print(dataRecv)
break
if not data or data['cmd'] == 'e':
break
if data['cmd'] == 'a':
# f: fail s: suscess
print('a')
if not players[int(data['table'])-1][0]:
players[int(data['table'])-1][0] = myaddr
sock.send(b'{"cmd":"s"}')
print(players)
elif not players[int(data['table'])-1][1]:
players[int(data['table'])-1][1] = myaddr
sock.send(b'{"cmd":"s"}')
print(players)
else:
sock.send(b'{"cmd":"f","reason":"Table Full"}')
if data['cmd'] == 'r':
sock.send(json.dumps(cardFun(7)).encode('utf-8'))
if data['cmd'] == 'p':
pass
dataRecv = ''
sock.close()
print('Connection from %s:%s closed.' % addr)
while True:
# 接受一个新连接:
try:
sock, addr = mysocket.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
except BlockingIOError:
pass
| 32.522523
| 98
| 0.528255
|
4a0fbfa607944aa43ab8aedf2528c79cc74f0d6f
| 990
|
py
|
Python
|
sanic/models/handler_types.py
|
Lin0818/sanic
|
0cb342aef4c8cfd8a7287f800dc9a3487b1360ca
|
[
"MIT"
] | null | null | null |
sanic/models/handler_types.py
|
Lin0818/sanic
|
0cb342aef4c8cfd8a7287f800dc9a3487b1360ca
|
[
"MIT"
] | null | null | null |
sanic/models/handler_types.py
|
Lin0818/sanic
|
0cb342aef4c8cfd8a7287f800dc9a3487b1360ca
|
[
"MIT"
] | null | null | null |
from asyncio.events import AbstractEventLoop
from typing import Any, Callable, Coroutine, Optional, TypeVar, Union
import sanic
from sanic.request import Request
from sanic.response import BaseHTTPResponse, HTTPResponse
Sanic = TypeVar("Sanic", bound="sanic.Sanic")
MiddlewareResponse = Union[
Optional[HTTPResponse], Coroutine[Any, Any, Optional[HTTPResponse]]
]
RequestMiddlewareType = Callable[[Request], MiddlewareResponse]
ResponseMiddlewareType = Callable[
[Request, BaseHTTPResponse], MiddlewareResponse
]
ErrorMiddlewareType = Callable[
[Request, BaseException], Optional[Coroutine[Any, Any, None]]
]
MiddlewareType = Union[RequestMiddlewareType, ResponseMiddlewareType]
ListenerType = Union[
Callable[[Sanic], Optional[Coroutine[Any, Any, None]]],
Callable[[Sanic, AbstractEventLoop], Optional[Coroutine[Any, Any, None]]],
]
RouteHandler = Callable[..., Coroutine[Any, Any, Optional[HTTPResponse]]]
SignalHandler = Callable[..., Coroutine[Any, Any, None]]
| 34.137931
| 78
| 0.774747
|
4a0fbfb4603136011c913aee1f9c74dd60f4e81f
| 5,077
|
py
|
Python
|
modules/database/create_table.py
|
elliotwutingfeng/Google-Safe-Browsing-DNSBL-Generator
|
1ed8d49047081dd4f6d929f3f9d4d97d21c366e4
|
[
"BSD-3-Clause"
] | null | null | null |
modules/database/create_table.py
|
elliotwutingfeng/Google-Safe-Browsing-DNSBL-Generator
|
1ed8d49047081dd4f6d929f3f9d4d97d21c366e4
|
[
"BSD-3-Clause"
] | null | null | null |
modules/database/create_table.py
|
elliotwutingfeng/Google-Safe-Browsing-DNSBL-Generator
|
1ed8d49047081dd4f6d929f3f9d4d97d21c366e4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
SQLite utilities for making CREATE TABLE queries
"""
from typing import Optional
from apsw import Error
from modules.database.connect import create_connection
from modules.utils.log import init_logger
from modules.utils.parallel_compute import execute_with_ray
from modules.utils.types import DatabaseTableModes
logger = init_logger()
async def _create_ips_table(db_filename: str) -> None:
"""Create SQLite table for storing ipv4 addresses
at `db_filename`.db database.
Args:
db_filename (str): SQLite database filename
"""
conn = create_connection(db_filename)
if conn is not None:
try:
cur = conn.cursor()
with conn:
cur.execute(
"""CREATE TABLE IF NOT EXISTS urls (
url text,
lastGoogleMalicious integer,
lastYandexMalicious integer,
hash text
)"""
)
# To avoid writing redundant SQL queries,
# we shall refer to ipv4 addresses as urls in SQL
except Error as error:
logger.error("filename:%s %s", db_filename, error, exc_info=True)
conn.close()
async def _create_urls_table(db_filename: str) -> None:
"""Create SQLite table for storing URLs (that are not ipv4 addresses)
at `db_filename`.db database.
Args:
db_filename (str): SQLite database filename
"""
conn = create_connection(db_filename)
if conn is not None:
try:
cur = conn.cursor()
with conn:
cur.execute(
"""CREATE TABLE IF NOT EXISTS urls (
url text UNIQUE,
lastListed integer,
lastGoogleMalicious integer,
lastYandexMalicious integer,
hash text
)"""
)
except Error as error:
logger.error("filename:%s %s", db_filename, error, exc_info=True)
conn.close()
def _create_malicious_url_hashes_tables(
db_filename: str = "malicious",
) -> None:
"""Create SQLite tables for storing malicious URL hash prefixes and full hashes
at `malicious`.db database.
Args:
db_filename (str): SQLite database filename. Defaults to "malicious".
"""
conn = create_connection(db_filename)
if conn is not None:
try:
cur = conn.cursor()
with conn:
cur.execute(
"""CREATE TABLE IF NOT EXISTS maliciousHashPrefixes (
hashPrefix text,
prefixSize integer,
vendor text
)"""
)
cur.execute(
"""CREATE TABLE IF NOT EXISTS maliciousFullHashes (
fullHash text,
vendor text,
UNIQUE (fullHash,vendor)
)"""
)
except Error as error:
logger.error("%s", error, exc_info=True)
conn.close()
def initialise_databases(
db_filenames: Optional[list[str]] = None,
mode: DatabaseTableModes = "hashes",
) -> None:
"""If `mode` is set to "domains" or "ips", create database for
each db_filename in `db_filenames`
list if any of them do not exist yet.
If `mode` is set to "hashes", create database for storing malicious
URL hash prefixes and full hashes.
Args:
db_filenames (Optional[list[str]]): SQLite database filenames.
Defaults to None.
mode (DatabaseTableModes): If "hashes", create databases for
malicious URL hash prefixes and full hashes,
if "domains", create databases for non-ipv4 URLs,
if "ips", create databases for ipv4 addresses.
Defaults to "hashes".
Raises:
ValueError: `mode` must be "hashes" or "domains" or "ips"
"""
if mode not in ("hashes", "domains", "ips"):
raise ValueError('mode must be "hashes" or "domains" or "ips"')
if mode == "hashes":
db_filenames = ["malicious"]
elif db_filenames is None or not len(db_filenames):
return
logger.info(
"Initialising %d %s .db %s",
len(db_filenames),
mode,
"files" if len(db_filenames) > 1 else "file",
)
if mode == "hashes":
_create_malicious_url_hashes_tables(db_filenames[0])
elif mode == "domains":
execute_with_ray(
_create_urls_table,
[(filename,) for filename in db_filenames],
)
elif mode == "ips":
execute_with_ray(
_create_ips_table,
[(filename,) for filename in db_filenames],
)
| 34.073826
| 83
| 0.534568
|
4a0fbfd825e3bb2406c0f7850e3d427bf0c52b3b
| 8,875
|
py
|
Python
|
mc-package/mcsim/monte_carlo.py
|
msse-2021-bootcamp/team2-project
|
3915fd811be09e79d7ea5c9a368d7849ef5b629b
|
[
"BSD-3-Clause"
] | null | null | null |
mc-package/mcsim/monte_carlo.py
|
msse-2021-bootcamp/team2-project
|
3915fd811be09e79d7ea5c9a368d7849ef5b629b
|
[
"BSD-3-Clause"
] | 22
|
2021-08-10T20:36:55.000Z
|
2021-08-20T02:35:02.000Z
|
mc-package/mcsim/monte_carlo.py
|
msse-2021-bootcamp/team2-project
|
3915fd811be09e79d7ea5c9a368d7849ef5b629b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Functions for running a Motne Carlo Simulation
"""
import math
import random
def calculate_total_energy(coordinates, box_length, cutoff):
"""
Calculate the total energy of a set of particles using the Lennard Jones potential.
Parameters
----------
coordinates : list
A nested list containing the x, y,z coordinate for each particle
box_length : float
The length of the box. Assumes cubic box.
cutoff : float
The cutoff length
Returns
-------
total_energy : float
The total energy of the set of coordinates.
"""
total_energy = 0
num_atoms = len(coordinates)
for i in range(num_atoms):
for j in range(i+1, num_atoms):
# Calculate the distance between the particles - exercise.
dist_ij = calculate_distance(coordinates[i], coordinates[j], box_length)
if dist_ij < cutoff:
# Calculate the pairwise LJ energy
LJ_ij = calculate_LJ(dist_ij)
# Add to total energy.
total_energy += LJ_ij
return total_energy
def read_xyz(filepath):
"""
Reads coordinates from an xyz file.
Parameters
----------
filepath : str
The path to the xyz file to be processed.
Returns
-------
atomic_coordinates : list
A two dimensional list containing atomic coordinates
"""
with open(filepath) as f:
box_length = float(f.readline().split()[0])
num_atoms = float(f.readline())
coordinates = f.readlines()
atomic_coordinates = []
for atom in coordinates:
split_atoms = atom.split()
float_coords = []
# We split this way to get rid of the atom label.
for coord in split_atoms[1:]:
float_coords.append(float(coord))
atomic_coordinates.append(float_coords)
return atomic_coordinates, box_length
def calculate_LJ(r_ij):
"""
The LJ interaction energy between two particles.
Computes the pairwise Lennard Jones interaction energy based on the separation distance in reduced units.
Parameters
----------
r_ij : float
The distance between the particles in reduced units.
Returns
-------
pairwise_energy : float
The pairwise Lennard Jones interaction energy in reduced units.
Examples
--------
>>> calculate_LJ(1)
0
"""
r6_term = math.pow(1/r_ij, 6)
r12_term = math.pow(r6_term, 2)
pairwise_energy = 4 * (r12_term - r6_term)
return pairwise_energy
def calculate_distance(coord1, coord2, box_length=None):
"""
Calculate the distance between two points. When box_length is set, the minimum image convention is used to calculate the distance between the points.
Parameters
----------
coord1, coord2 : list
The coordinates of the points, [x, y, z]
box_length : float, optional
The box length
Returns
-------
distance : float
The distance between the two points accounting for periodic boundaries
"""
distance = 0
for i in range(3):
hold_dist = abs(coord2[i] - coord1[i])
if (box_length):
if hold_dist > box_length/2:
hold_dist = hold_dist - (box_length * round(hold_dist/box_length))
distance += math.pow(hold_dist, 2)
return math.sqrt(distance)
def calculate_tail_correction(num_particles, box_length, cutoff):
"""
The tail correction associated with using a cutoff radius.
Computes the tail correction based on a cutoff radius used in the LJ energy calculation in reduced units.
Parameters
----------
num_particles : int
The number of particles in the system.
box_length : int
Size of the box length of the system, used to calculate volume.
cutoff : int
Cutoff distance.
Returns
-------
tail_correction : float
The tail correction associated with using the cutoff.
"""
brackets = (1/3*math.pow(1/cutoff,9)) - math.pow(1/cutoff,3)
volume = box_length**3
constant = ((8*math.pi*(num_particles**2))/(3*volume))
tail_correction = constant * brackets
return tail_correction
def accept_or_reject(delta_U, beta):
"""
Accept or reject a move based on the Metropolis criterion.
Parameters
----------
detlta_U : float
The change in energy for moving system from state m to n.
beta : float
1/temperature
Returns
-------
boolean
Whether the move is accepted.
"""
if delta_U <= 0.0:
accept = True
else:
#Generate a random number on (0,1)
random_number = random.random()
p_acc = math.exp(-beta*delta_U)
if random_number < p_acc:
accept = True
else:
accept = False
return accept
def calculate_pair_energy(coordinates, i_particle, box_length, cutoff):
"""
Calculate the interaction energy of a particle with its environment (all other particles in the system)
Parameters
----------
coordinates : list
The coordinates for all the particles in the system.
i_particle : int
The particle number for which to calculate the energy.
cutoff : float
The simulation cutoff. Beyond this distance, interactions are not calculated.
box_length : float
The length of the box for periodic bounds
Returns
-------
e_total : float
The pairwise interaction energy of the ith particles with all other particles in the system
"""
e_total = 0.0
#creates a list of the coordinates for the i_particle
i_position = coordinates[i_particle]
num_atoms = len(coordinates)
for j_particle in range(num_atoms):
if i_particle != j_particle:
#creates a list of coordinates for the j_particle
j_position = coordinates[j_particle]
rij = calculate_distance(i_position, j_position, box_length)
if rij < cutoff:
e_pair = calculate_LJ(rij)
e_total += e_pair
return e_total
def run_simulation(coordinates, box_length, cutoff, reduced_temperature, num_steps, max_displacement=0.1, freq=1000):
"""
Run a Monte Carlo simulation with the specific parameters.
Parameters
---------
coordinates: list
box_length: float
cutoff: float
reduced_temperature: float
num_steps:int
max_displacement: float
freq_checks: int
"""
# Calculated quantities
beta = 1 / reduced_temperature
num_particles = len(coordinates)
# Energy calculations
total_energy = calculate_total_energy(coordinates, box_length, cutoff)
total_correction = calculate_tail_correction(num_particles, box_length, cutoff)
total_energy += total_correction
for step in range(num_steps):
# 1. Randomly pick one of the particles.
random_particle = random.randrange(num_particles)
# 2. Calculate the interaction energy of the selected particle with the system.
current_energy = calculate_pair_energy(coordinates, random_particle, box_length, cutoff)
# 3. Generate a random x, y, z displacement.
x_rand = random.uniform(-max_displacement, max_displacement)
y_rand = random.uniform(-max_displacement, max_displacement)
z_rand = random.uniform(-max_displacement, max_displacement)
# 4. Modify the coordinate of Nth particle by generated displacements.
coordinates[random_particle][0] += x_rand
coordinates[random_particle][1] += y_rand
coordinates[random_particle][2] += z_rand
# 5. Calculate the interaction energy of the moved particle with the system and store this value.
proposed_energy = calculate_pair_energy(coordinates, random_particle, box_length, cutoff)
delta_energy = proposed_energy - current_energy
# 6. Calculate if we accept the move based on energy difference.
accept = accept_or_reject(delta_energy, beta)
# 7. If accepted, move the particle.
if accept:
total_energy += delta_energy
else:
#Move not accepted, roll back coordinates
coordinates[random_particle][0] -= x_rand
coordinates[random_particle][1] -= y_rand
coordinates[random_particle][2] -= z_rand
# 8. Print the energy if step is a multiple of freq.
if step % freq == 0:
print(step, total_energy/num_particles)
return coordinates
print("package imported!")
| 29.098361
| 153
| 0.626592
|
4a0fc0d449f6e59064b7e4c4e395313b9ac90ff2
| 2,071
|
py
|
Python
|
tests/test_storage.py
|
raphaelm/django-pgrowcrypt
|
cf113acf19e1656fd436bd346cfc0a240a423890
|
[
"Apache-2.0"
] | null | null | null |
tests/test_storage.py
|
raphaelm/django-pgrowcrypt
|
cf113acf19e1656fd436bd346cfc0a240a423890
|
[
"Apache-2.0"
] | 1
|
2018-12-22T23:37:36.000Z
|
2018-12-22T23:37:36.000Z
|
tests/test_storage.py
|
raphaelm/django-pgrowcrypt
|
cf113acf19e1656fd436bd346cfc0a240a423890
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from django.db import InternalError
from .testapp.models import Author, Book
@pytest.mark.django_db
def test_save_and_retrieve_cycles(key):
Book.objects.create(title='The Lord of the Rings', _key=key)
b = Book.objects.with_key(key).first()
assert b.title == 'The Lord of the Rings'
b.title = 'Harry Potter'
b.save()
b = Book.objects.with_key(key).first()
assert b.title == 'Harry Potter'
@pytest.mark.django_db
def test_create_from_query(key):
Book.objects.with_key(key).create(title='The Lord of the Rings')
b = Book.objects.with_key(key).first()
assert b.title == 'The Lord of the Rings'
@pytest.mark.django_db
def test_delete(key):
b = Book.objects.with_key(key).create(title='The Lord of the Rings')
b.delete()
assert Book.objects.with_key(key).count() == 0
@pytest.mark.django_db
def test_refresh_from_db(key):
b = Book.objects.with_key(key).create(title='The Lord of the Rings')
b.refresh_from_db()
assert b.title == 'The Lord of the Rings'
@pytest.mark.django_db
def test_missing_key():
with pytest.raises(TypeError):
Book.objects.create(title='The Lord of the Rings')
Book.objects.create(title='The Lord of the Rings', _key='a')
with pytest.raises(InternalError):
Book.objects.first()
@pytest.mark.django_db
def test_bulk_create(key):
bulk = [
Book(title='The Lord of the Rings', _key=key),
Book(title='Harry Potter', _key=key)
]
Book.objects.bulk_create(bulk)
assert Book.objects.with_key(key).filter(title__icontains='Rings').count() == 1
assert Book.objects.with_key(key).filter(title__icontains='Potter').count() == 1
@pytest.mark.django_db
def test_store_prefetched(key, django_assert_num_queries):
b1 = Book.objects.create(title='Harry Potter', author=Author.objects.create(name='J. K. Rowling', _key=key), _key=key)
authors = list(Author.objects.with_key(key).prefetch_related('book_set').order_by('name'))
b1 = authors[0].book_set.all()[0]
b1.title = 'Harry Potter 2'
b1.save()
| 31.378788
| 122
| 0.700145
|
4a0fc22a412e1708101cefb57a2862173b838e9d
| 2,040
|
py
|
Python
|
mrl/algorithms/random_ensemble_DPG.py
|
ag8/mrl
|
f05b00347f88020cbeb216c7e4764a4d2523b67e
|
[
"MIT"
] | null | null | null |
mrl/algorithms/random_ensemble_DPG.py
|
ag8/mrl
|
f05b00347f88020cbeb216c7e4764a4d2523b67e
|
[
"MIT"
] | null | null | null |
mrl/algorithms/random_ensemble_DPG.py
|
ag8/mrl
|
f05b00347f88020cbeb216c7e4764a4d2523b67e
|
[
"MIT"
] | 1
|
2021-08-12T23:13:03.000Z
|
2021-08-12T23:13:03.000Z
|
"""
This is a random ensemble hybrid of DDPG/TD3, roughly based on https://arxiv.org/pdf/1907.04543.pdf.
"""
from mrl.algorithms.continuous_off_policy import *
class RandomEnsembleDPG(OffPolicyActorCritic):
def optimize_from_batch(self, states, actions, rewards, next_states,
gammas):
config = self.config
a_next_max = self.actor_target(next_states)
noise = torch.randn_like(a_next_max) * (
config.td3_noise * self.action_scale)
noise = noise.clamp(-config.td3_noise_clip * self.action_scale,
config.td3_noise_clip * self.action_scale)
a_next_max = (a_next_max + noise).clamp(-self.action_scale,
self.action_scale)
qs = []
for critic in self.critics:
qs.append(critic(next_states, a_next_max))
qs = torch.cat(qs, dim=-1) # batch x num_qs
sample = torch.distributions.dirichlet.Dirichlet(torch.ones(1, qs.size(-1))).sample().to(self.config.device)
target = (rewards + gammas * (qs * sample).sum(-1, keepdim=True))
target = torch.clamp(target, *self.config.clip_target_range).detach()
if hasattr(self, 'logger') and self.config.opt_steps % 100 == 0:
self.logger.add_histogram('Optimize/Target_q', target)
qs = []
for critic in self.critics:
qs.append(critic(states, actions))
qs = (torch.cat(qs, dim=-1) * sample).sum(-1, keepdim=True)
critic_loss = F.mse_loss(qs, target)
self.critic_opt.zero_grad()
critic_loss.backward()
self.critic_opt.step()
if config.opt_steps % config.td3_delay == 0:
a = self.actor(states)
qs = []
for critic in self.critics:
qs.append(critic(states, a))
qs = torch.cat(qs, dim=-1)
actor_loss = -qs.mean()
self.actor_opt.zero_grad()
actor_loss.backward()
self.actor_opt.step()
| 35.172414
| 116
| 0.591176
|
4a0fc2c56c246d11e5a7b5fd460e9776dacba435
| 6,362
|
py
|
Python
|
lrutests.py
|
outofmbufs/PythonLRUCache
|
9bfee32770bb485bf94179ffe19988152e8e537d
|
[
"MIT"
] | null | null | null |
lrutests.py
|
outofmbufs/PythonLRUCache
|
9bfee32770bb485bf94179ffe19988152e8e537d
|
[
"MIT"
] | null | null | null |
lrutests.py
|
outofmbufs/PythonLRUCache
|
9bfee32770bb485bf94179ffe19988152e8e537d
|
[
"MIT"
] | null | null | null |
import threading
import queue
import unittest
import random
from lrucache import ManualLRUCache
class TestMethods(unittest.TestCase):
testvals = (('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5))
# helper function. Builds the cache.
# NOTE: Tests rely on knowing this puts the items into the cache
# in order (i.e., callers know the LRU corresponds to kvs)
def makecache1(self, kvs):
c = ManualLRUCache(cachesize=len(kvs))
for k, v in kvs:
c.encache(k, v)
return c
# same but uses [] notation
def makecache2(self, kvs):
c = ManualLRUCache(cachesize=len(kvs))
for k, v in kvs:
c[k] = v
return c
makecache = makecache2 # default but tests might use both
def test_CC1(self):
for maker in (self.makecache1, self.makecache2):
c = maker(self.testvals)
# all should be in the cache
for k, v in self.testvals:
self.assertTrue(k in c)
self.assertEqual(c[k], v)
def test_CC2(self):
for maker in (self.makecache1, self.makecache2):
c = maker(self.testvals)
# add another kv, kicking out the first (oldest)
kx, vx = object(), 'whatever'
c.encache(kx, vx)
# now the first one should be gone
k0, v0 = self.testvals[0]
self.assertFalse(k0 in c)
# the rest should all be in there
for k, v in self.testvals[1:]:
self.assertTrue(k in c)
self.assertEqual(c[k], v)
# and the newest one should be in there
self.assertTrue(kx in c)
self.assertEqual(c[kx], vx)
def test_CC3(self):
c = self.makecache(self.testvals)
# use the first one, so it is no longer the oldest
k0, v0 = self.testvals[0]
self.assertEqual(c[k0], v0)
# add another kv, which will kick out the SECOND one
kx, vx = object(), 'whatever'
c.encache(kx, vx)
self.assertTrue(k0 in c) # first one still in there
k1, v1 = self.testvals[1]
self.assertFalse(k1 in c) # second should be gone
for k, v in self.testvals[2:]:
self.assertTrue(k in c)
self.assertEqual(c[k], v)
# and that new one should still be in there
self.assertTrue(kx in c)
self.assertEqual(c[kx], vx)
def test_CC4(self):
vsize = 500
cachesizes = [vsize * 2, vsize + 1, vsize, vsize - 1, vsize - 10,
vsize // 2, vsize // 7, vsize // 10]
for keyfmt in (None, "key:{}", "irrelevantly{}longtestkey"):
kvs = [(keyfmt.format(i) if keyfmt else i,
"value:{}".format(i))
for i in range(vsize)]
for cachesize in cachesizes:
c = ManualLRUCache(cachesize=cachesize)
# run all the k,v pairs through the cache, such that
# the last "cachesize" elements in kvs will be the LRU
# elements in the cache.
for k, v in kvs:
c.encache(k, v)
# now randomly access key/value pairs and manually
# track the LRU behavior and see if it matches
# XXX: This is really a test of @lru_cache and it might
# be arguable that strict adherence to cachesize
# isn't part of the spec (but this tests for it)
for i in range(cachesize * 10):
nth = random.randrange(vsize)
k, v = kvs[nth]
kvs.remove((k, v))
kvs.append((k, v))
c.encache(k, v)
self.assertEqual(v, c[k])
# after all that, the last 'cachesize' elements of kvs
# should all be in the cache - which may be ALL of the kvs
# if the cache is larger.
for i, kv in enumerate(kvs[::-1]):
k, v = kv
if i < cachesize:
self.assertEqual(c[k], v)
else:
# XXX this tests that any beyond the 'cachesize'
# are not in the cache, but is that really
# part of the @lru_cache interface? Is
# cachesize a suggestion or a guarantee???
self.assertFalse(k in c)
def test_readme(self):
# example taken directly from README.md
c = ManualLRUCache()
c.encache('a', 1)
c['foo'] = 17
self.assertTrue('a' in c)
self.assertFalse('b' in c)
self.assertTrue('foo' in c)
self.assertEqual(c['a'], 1)
with self.assertRaises(KeyError):
_ = c['b']
self.assertEqual(c['foo'], 17)
def test_threading(self):
# an attempt to demonstrate that all this works
# correctly with multiple threads; obviously, there's
# no certainty this would expose real race conditions
failures = queue.Queue() # threads will send bad news here
def basher(c, k, nx):
for i in range(nx):
c.encache((k, i), (k, i))
# there's no guarantee it stays in there, it could
# get bounced out from other threads, but the key
# (no pun intended) point here is that if it IS in there
# to make sure it has the right value
try:
kx, ix = c[(k, i)]
except KeyError:
pass
else:
if kx != k or ix != i:
failures.put((k, i, kx, ix))
break
# numbers determined somewhat arbitrarily
nthreads = 50
nx = 20000
cachesize = 17
kvs = [(object(), None) for i in range(cachesize)]
c = self.makecache(kvs)
threads = [threading.Thread(target=basher, args=(c, i, nx))
for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertTrue(failures.empty())
if __name__ == "__main__":
unittest.main()
| 34.389189
| 75
| 0.509588
|
4a0fc4032c7e837f21c2820561e648c32c935467
| 8,748
|
py
|
Python
|
clarity-ext-scripts/clarity_ext_scripts/covid/pcr/example_result_file_rt_pcr_qs7.py
|
ctmrbio/claritylims
|
e4f193e9cd3aa54b2a1974a2ce5b573c397a88eb
|
[
"MIT"
] | 4
|
2020-04-13T14:52:33.000Z
|
2022-02-03T14:50:41.000Z
|
clarity-ext-scripts/clarity_ext_scripts/covid/pcr/example_result_file_rt_pcr_qs7.py
|
ctmrbio/claritylims
|
e4f193e9cd3aa54b2a1974a2ce5b573c397a88eb
|
[
"MIT"
] | 104
|
2019-04-01T08:35:59.000Z
|
2021-05-04T11:53:00.000Z
|
clarity-ext-scripts/clarity_ext_scripts/covid/pcr/example_result_file_rt_pcr_qs7.py
|
ctmrbio/claritylims
|
e4f193e9cd3aa54b2a1974a2ce5b573c397a88eb
|
[
"MIT"
] | 4
|
2020-04-02T07:06:41.000Z
|
2021-08-20T06:42:27.000Z
|
import xlwt
import collections
import datetime
import random
from clarity_ext.extensions import GeneralExtension
class Extension(GeneralExtension):
def execute(self):
file_handle_name = "Result file"
timestamp = datetime.datetime.now().strftime("%y%m%dT%H%M%S")
user = self.context.current_user
file_name = "EXAMPLE-FILE_QS7_{}_{}.xls".format(user.initials, timestamp)
wb = xlwt.Workbook()
ws = wb.add_sheet('Results')
self.create_upper_header_info(ws)
table_row_start = 42
column_index = 0
# Creating the header of the table
for key in self.create_first_row():
ws.write(table_row_start, column_index, key)
column_index += 1
counter_first_row = 1
counter_second_row = 2
# Creating the content of the table
for well in self.context.output_container:
first_row = self.create_first_row(well, well.artifact)
second_row = self.create_second_row(well, well.artifact)
column_index = 0
counter_first_row = self.populate_table(column_index, table_row_start, first_row, counter_first_row, ws)
counter_second_row = self.populate_table(column_index, table_row_start, second_row, counter_second_row, ws)
full_path, file_handle = self.context.file_service.pre_queue(file_name, file_handle_name)
wb.save(full_path)
self.context.file_service.queue(full_path, file_handle)
def populate_table(self, column_index, table_row_start, row, counter_row, ws):
for key, value in row.items():
ws.write(counter_row + table_row_start, column_index, value)
column_index += 1
counter_row += 2
return counter_row
def create_first_row(self, well=None, artifact=None):
header = collections.OrderedDict()
header["Well"] = well.index_down_first if well else None
header["Well Position"] = well.alpha_num_key if artifact else None
header["Omit"] = "FALSE"
header["Sample Name"] = artifact.name if artifact else None
header["Target Name"] = "NCoV Orf1ab"
header["Task"] = self.task(artifact) if artifact else None if artifact else None
header["Reporter"] = "FAM" if artifact else None
header["Quencher"] = "None"
header["CT"] = self.random_number(artifact)if artifact else None
header["Ct Mean"] = self.random_number(artifact)if artifact else None
header["Ct SD"] = None
header["Quantity"] = None
header["Quantity Mean"] = None
header["Quantity SD"] = None
header["Y-Intercept"] = None
header["R(superscript 2)"] = None
header["Slope"] = None
header["Efficiency"] = None
header["Automatic Ct Threshold"] = "FALSE"
header["Ct Threshold"] = "188 495.138"
header["Automatic Baseline"] = "TRUE"
header["Baseline Start"] = "1"
header["Baseline End"] = random.randint(0, 15)
header["Comments"] = None
return header
def create_second_row(self, well=None, artifact=None):
header = collections.OrderedDict()
header["Well"] = well.index_down_first if well else None
header["Well Position"] = well.alpha_num_key if artifact else None
header["Omit"] = "FALSE"
header["Sample Name"] = artifact.name if artifact else None
header["Target Name"] = "RNaseP"
header["Task"] = self.task(artifact) if artifact else None
header["Reporter"] = "VIC" if artifact else None
header["Quencher"] = "None"
header["CT"] = self.random_number(artifact) if artifact else None
header["Ct Mean"] = self.random_number(artifact)if artifact else None
header["Ct SD"] = None
header["Quantity"] = None
header["Quantity Mean"] = None
header["Quantity SD"] = None
header["Y-Intercept"] = None
header["R(superscript 2)"] = None
header["Slope"] = None
header["Efficiency"] = None
header["Automatic Ct Threshold"] = "FALSE"
header["Ct Threshold"] = "39 589.086"
header["Automatic Baseline"] = "FALSE"
header["Baseline Start"] = "3"
header["Baseline End"] = random.randint(0, 15)
header["Comments"] = None
return header
def task(self, artifact):
if artifact.name.lower().startswith("negative"):
return "NTC"
else:
return "UNKNOWN"
def random_number(self, artifact):
if artifact.name.lower().startswith("negative"):
return random.randint(0, 1)
elif artifact.name.lower().startswith("positive"):
return random.randint(30, 40)
else:
return random.randint(0, 40)
def create_upper_header_info(self, ws):
ws.write(0, 0, "Block Type")
ws.write(1, 0, "Calibration Background is expired ")
ws.write(2, 0, "Calibration Background performed on")
ws.write(3, 0, "Calibration Normalization FAM-ROX is expired")
ws.write(4, 0, "Calibration Normalization FAM-ROX performed on")
ws.write(5, 0, "Calibration Normalization VIC-ROX is expired")
ws.write(6, 0, "Calibration Normalization VIC-ROX performed on")
ws.write(7, 0, "Calibration Pure Dye CY5 is expired")
ws.write(8, 0, "Calibration Pure Dye CY5 performed on")
ws.write(9, 0, "Calibration Pure Dye FAM is expired")
ws.write(10, 0, "Calibration Pure Dye FAM performed on")
ws.write(11, 0, "Calibration Pure Dye NED is expired")
ws.write(12, 0, "Calibration Pure Dye NED performed on")
ws.write(13, 0, "Calibration Pure Dye ROX is expired")
ws.write(14, 0, "Calibration Pure Dye ROX performed on")
ws.write(15, 0, "Calibration Pure Dye SYBR is expired")
ws.write(16, 0, "Calibration Pure Dye SYBR performed on")
ws.write(17, 0, "Calibration Pure Dye TAMRA is expired")
ws.write(18, 0, "Calibration Pure Dye TAMRA performed on")
ws.write(19, 0, "Calibration Pure Dye VIC is expired")
ws.write(20, 0, "Calibration Pure Dye VIC performed on")
ws.write(21, 0, "Calibration ROI is expired ")
ws.write(22, 0, "Calibration ROI performed on")
ws.write(23, 0, "Calibration Uniformity is expired ")
ws.write(24, 0, "Calibration Uniformity performed on")
ws.write(25, 0, "Chemistry")
ws.write(26, 0, "Date Created")
ws.write(27, 0, "Experiment Barcode")
ws.write(28, 0, "Experiment Comment")
ws.write(29, 0, "Experiment File Name")
ws.write(30, 0, "Experiment Name")
ws.write(31, 0, "Experiment Run End Time")
ws.write(32, 0, "Experiment Type")
ws.write(33, 0, "Instrument Name")
ws.write(34, 0, "Instrument Serial Number")
ws.write(35, 0, "Instrument Type")
ws.write(36, 0, "Passive Reference")
ws.write(37, 0, "Quantification Cycle Method")
ws.write(38, 0, "Signal Smoothing On")
ws.write(39, 0, "Stage/ Cycle where Analysis is performed")
ws.write(40, 0, "User Name")
ws.write(0, 1, "96-Well Block (0.2mL)")
ws.write(1, 1, "No")
ws.write(2, 1, "02-18-2020")
ws.write(3, 1, "No")
ws.write(4, 1, "02-18-2020")
ws.write(5, 1, "No")
ws.write(6, 1, "02-18-2020")
ws.write(7, 1, "Yes")
ws.write(8, 1, "01-10-2019")
ws.write(9, 1, "No")
ws.write(10, 1, "02-18-2020")
ws.write(11, 1, "No")
ws.write(12, 1, "02-18-2020")
ws.write(13, 1, "No")
ws.write(14, 1, "02-18-2020")
ws.write(15, 1, "No")
ws.write(16, 1, "02-18-2020")
ws.write(17, 1, "No")
ws.write(18, 1, "02-18-2020")
ws.write(19, 1, "No")
ws.write(20, 1, "02-18-2020")
ws.write(21, 1, "No")
ws.write(22, 1, "02-18-2020")
ws.write(23, 1, "No")
ws.write(24, 1, "02-18-2020")
ws.write(25, 1, "TAQMAN")
ws.write(26, 1, "2020-04-17 17:04:37 PM CEST")
ws.write(27, 1, "")
ws.write(28, 1, "")
ws.write(29, 1, "D:\\file.eds")
ws.write(30, 1, "2020-04-17 140736")
ws.write(31, 1, "2020-04-17 17:38:24 PM CEST")
ws.write(32, 1, "Standard Curve")
ws.write(33, 1, "278870044")
ws.write(34, 1, "278870044")
ws.write(35, 1, "QuantStudio(TM) 7 Flex System")
ws.write(36, 1, "")
ws.write(37, 1, "Ct")
ws.write(38, 1, "true")
ws.write(39, 1, "Stage 2, Step 2")
ws.write(40, 1, "")
def integration_tests(self):
yield self.test("24-43779", commit=False)
| 41.856459
| 119
| 0.599108
|
4a0fc4e68cca766f8615982bf9af6e6ab61ad00c
| 51,719
|
py
|
Python
|
services/data_generator.py
|
meredithmurfin/DynamicPlacementGenerator
|
60c0e3f1892c702ae46db78a89ff5e64f472a1b7
|
[
"MIT"
] | 2
|
2021-01-14T12:53:13.000Z
|
2021-03-28T19:28:46.000Z
|
services/data_generator.py
|
EccRiley/DynamicPlacementGenerator
|
60c0e3f1892c702ae46db78a89ff5e64f472a1b7
|
[
"MIT"
] | null | null | null |
services/data_generator.py
|
EccRiley/DynamicPlacementGenerator
|
60c0e3f1892c702ae46db78a89ff5e64f472a1b7
|
[
"MIT"
] | 1
|
2020-04-29T19:24:35.000Z
|
2020-04-29T19:24:35.000Z
|
from util import data_util, reader_util, writer_util
import data
from itertools import combinations, combinations_with_replacement, permutations, chain, groupby
from operator import sub
import numpy as np
import pprint, logging, copy
def generate_all_possible_states():
all_states = {1: [], 2: [], 3: [], 4: [], 5: []}
for num in range(1, 6):
logging.info("Generating all possible states for " + str(num) + " engine(s) to use for the MDP...")
sums = find_all_unique_sums_to_n(num)
for s in sums:
if len(s) == 1:
new_states = generate_states_for_sum_length_1(s)
all_states[num].extend(new_states)
elif len(s) == 2:
new_states = generate_states_for_sum_length_2(s)
all_states[num].extend(new_states)
elif len(s) == 3:
new_states = generate_states_for_sum_length_3(s)
all_states[num].extend(new_states)
elif len(s) == 4:
new_states = generate_states_for_sum_length_4(s)
all_states[num].extend(new_states)
elif len(s) == 5:
new_states = generate_states_for_sum_length_5(s)
all_states[num].extend(new_states)
logging.info("States have been generated.")
writer_util.export_all_possible_states(all_states)
def generate_states_for_sum_length_1(s):
states = []
state = [0, 0, 0, 0, 0, 0, 0]
for i in range(7):
current_state = state[:]
current_state[i] = s[0]
states.append(current_state)
return states
def generate_states_for_sum_length_2(s):
states = []
state = [0, 0, 0, 0, 0, 0, 0]
for i in range(7):
for j in range(7):
if indices_are_not_equal([i, j]):
current_state = state[:]
current_state[i] = s[0]
current_state[j] = s[1]
states.append(current_state)
return states
def generate_states_for_sum_length_3(s):
states = []
state = [0, 0, 0, 0, 0, 0, 0]
for i in range(7):
for j in range(7):
for k in range(7):
if indices_are_not_equal([i, j, k]):
current_state = state[:]
current_state[i] = s[0]
current_state[j] = s[1]
current_state[k] = s[2]
states.append(current_state)
return states
def generate_states_for_sum_length_4(s):
states = []
state = [0, 0, 0, 0, 0, 0, 0]
for i in range(7):
for j in range(7):
for k in range(7):
for l in range(7):
if indices_are_not_equal([i, j, k, l]):
current_state = state[:]
current_state[i] = s[0]
current_state[j] = s[1]
current_state[k] = s[2]
current_state[l] = s[3]
states.append(current_state)
return states
def generate_states_for_sum_length_5(s):
states = []
state = [0, 0, 0, 0, 0, 0, 0]
for i in range(7):
for j in range(7):
for k in range(7):
for l in range(7):
for m in range(7):
if indices_are_not_equal([i, j, k, l, m]):
current_state = state[:]
current_state[i] = s[0]
current_state[j] = s[1]
current_state[k] = s[2]
current_state[l] = s[3]
current_state[m] = s[4]
states.append(current_state)
return states
def generate_all_possible_actions():
for num in range(1, 6):
logging.info("Generating all possible actions for " + str(num) + " engine(s) to use for the MDP...")
all_actions = []
sums = find_all_unique_sums_to_n(num)
for s in sums:
if len(s) == 1:
new_actions = generate_actions_for_sum_length_1(s)
all_actions.extend(new_actions)
elif len(s) == 2:
new_actions = generate_actions_for_sum_length_2(s)
all_actions.extend(new_actions)
elif len(s) == 3:
new_actions = generate_actions_for_sum_length_3(s)
all_actions.extend(new_actions)
elif len(s) == 4:
new_actions = generate_actions_for_sum_length_4(s)
all_actions.extend(new_actions)
logging.info("Actions have been generated.")
writer_util.export_all_possible_actions(num, all_actions)
def generate_actions_for_sum_length_1(s):
actions = []
action_row = [np.zeros(7)] * 7
action = np.array(action_row)
for i in range(7):
for j in range(7):
current_action = copy.deepcopy(action)
current_action[i][j] = s[0]
actions.append(current_action)
return actions
def generate_actions_for_sum_length_2(s):
actions = []
action_row = [np.zeros(7)] * 7
action = np.array(action_row)
for i in range(7):
for j in range(7):
for k in range(7):
for l in range(7):
if index_pairs_are_not_equal([[i, j], [k, l]]):
current_action = copy.deepcopy(action)
current_action[i][j] = s[0]
current_action[k][l] = s[1]
actions.append(current_action)
return actions
def generate_actions_for_sum_length_3(s):
actions = []
action_row = [np.zeros(7)] * 7
action = np.array(action_row)
for i in range(7):
for j in range(7):
for k in range(7):
for l in range(7):
for m in range(7):
for n in range(7):
if index_pairs_are_not_equal([[i, j], [k, l], [m, n]]):
current_action = copy.deepcopy(action)
current_action[i][j] = s[0]
current_action[k][l] = s[1]
current_action[m][n] = s[2]
actions.append(current_action)
return actions
def generate_actions_for_sum_length_4(s):
actions = []
action_row = [np.zeros(7)] * 7
action = np.array(action_row)
for i in range(7):
for j in range(7):
for k in range(7):
for l in range(7):
for m in range(7):
for n in range(7):
for o in range(7):
for p in range(7):
if index_pairs_are_not_equal([[i, j], [k, l], [m, n], [o, p]]):
current_action = copy.deepcopy(action)
current_action[i][j] = s[0]
current_action[k][l] = s[1]
current_action[m][n] = s[2]
current_action[o][p] = s[2]
actions.append(current_action)
return actions
def find_all_unique_sums_to_n(n):
beginning, middle, end = [0], list(range(1, n)), [n]
splits = (d for i in range(n) for d in combinations(middle, i))
list_of_sums = (list(map(sub, chain(split, end), chain(beginning, split))) for split in splits)
unique_list_of_sums = get_unique_list_of_lists(list_of_sums)
return unique_list_of_sums
def get_unique_list_of_lists(a_list):
unique_list_of_lists = []
for l in a_list:
l.sort()
if l not in unique_list_of_lists:
unique_list_of_lists.append(l)
return unique_list_of_lists
def indices_are_not_equal(indices):
if len(indices) != len(set(indices)):
return False
return True
def index_pairs_are_not_equal(index_pairs):
for pair in index_pairs:
if index_pairs.count(pair) > 1:
return False
return True
def generate_all_possible_removal_situations(engine_subtype):
logging.info("Generating all possible removal situations for the " + engine_subtype + "...")
removals_info = data.removals_info[engine_subtype]
num_different_removals_non_hubs = removals_info['MAX_NUM_REMOVALS_MONTHLY_NON_HUBS']
assert num_different_removals_non_hubs in [0, 1, 2], "This program cannot handle generating all removal situations for non-hub locations having more than 2 total removals. Make sure MAX_NUM_REMOVALS_MONTHLY_NON_HUBS is set to 0, 1, or 2."
assert removals_info['MAX_NUM_REMOVALS_MONTHLY_TOTAL'] <= 10, "This program cannot handle generating all removal situations for more than 10 total removals. Make sure MAX_NUM_REMOVALS_MONTHLY_TOTAL is set to a value between 1 and 10."
num_allowed_at_hubs = find_num_occurrences_of_max_removals_for_hubs([
removals_info['MAX_NUM_REMOVALS_MONTHLY_ATL'],
removals_info['MAX_NUM_REMOVALS_MONTHLY_CVG'],
removals_info['MAX_NUM_REMOVALS_MONTHLY_DTW'],
removals_info['MAX_NUM_REMOVALS_MONTHLY_LAX'],
removals_info['MAX_NUM_REMOVALS_MONTHLY_MSP'],
removals_info['MAX_NUM_REMOVALS_MONTHLY_SEA'],
removals_info['MAX_NUM_REMOVALS_MONTHLY_SLC']])
logging.info(engine_subtype + " monthly removal information:")
logging.info("Expected AOS cost: " + str(data.aos_cost[engine_subtype]))
max_num_removals_total = removals_info['MAX_NUM_REMOVALS_MONTHLY_TOTAL']
logging.info("Maximum total number of removals: " + str(max_num_removals_total))
max_removals_non_hubs = removals_info['MAX_NUM_REMOVALS_MONTHLY_NON_HUBS']
logging.info("Maximum number of removals by location: ATL: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_ATL'])
+ ", CVG: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_CVG'])
+ ", DTW: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_DTW'])
+ ", LAX: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_LAX'])
+ ", MSP: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_MSP'])
+ ", SEA: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_SEA'])
+ ", SLC: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_SLC'])
+ ", NON-HUBS: " + str(max_removals_non_hubs))
num_different_removals_hubs = sum(num_allowed_at_hubs.values()) - (num_allowed_at_hubs[0] if 0 in num_allowed_at_hubs else 0)
ranges = find_ranges_of_num_removal_values_valid_at_hubs(num_allowed_at_hubs)
max_allowed = find_max_removals_allowed(num_allowed_at_hubs, max_removals_non_hubs)
removal_sums = {}
for num_removals in range(1, max_num_removals_total + 1):
removal_sums[num_removals] = find_all_valid_sums_for_current_num_removals(
num_removals=num_removals,
num_allowed_at_hubs=num_allowed_at_hubs,
num_different_removals_hubs=num_different_removals_hubs,
num_different_removals_non_hubs=num_different_removals_non_hubs,
max_removals_non_hubs=max_removals_non_hubs,
ranges=ranges,
max_allowed=max_allowed)
logging.info("All combinations of values to generate possible removal situations have been found.")
removals_generator = RemovalsGenerator(engine_subtype, removals_info, removal_sums, ranges)
removals_generator.generate_all_removal_situations()
def find_num_occurrences_of_max_removals_for_hubs(max_num_removals_at_hubs):
assert max(max_num_removals_at_hubs) <= 10, "This program cannot handle generating all removal situations for more than 10 removals happening at any hub one location. Make sure MAX_NUM_REMOVALS_MONTHLY for each hub is set to a value between 0 and 10."
max_num_removals_at_hubs_set = set(max_num_removals_at_hubs)
unique_max_num_removals_at_hubs = list(max_num_removals_at_hubs_set)
num_allowed_at_hubs = {}
for value in unique_max_num_removals_at_hubs:
num_allowed_at_hubs[value] = 0
for value in max_num_removals_at_hubs:
num_allowed_at_hubs[value] += 1
return num_allowed_at_hubs
def find_ranges_of_num_removal_values_valid_at_hubs(num_allowed_at_hubs):
ranges = []
num_removals_at_hubs = find_possible_num_removals_at_hubs(num_allowed_at_hubs)
if at_least_one_hub_never_has_removals(num_removals_at_hubs[0]):
num_removals_at_hubs = num_removals_at_hubs[1:]
current_min = 1
for num_removals in num_removals_at_hubs:
ranges.append([current_min, num_removals])
current_min = num_removals + 1
ranges.reverse()
return ranges
def find_possible_num_removals_at_hubs(num_allowed_at_hubs):
num_removals_at_hubs = list(num_allowed_at_hubs.keys())
num_removals_at_hubs.sort()
return num_removals_at_hubs
def at_least_one_hub_never_has_removals(lowest_num_removals_at_hubs):
return (lowest_num_removals_at_hubs == 0)
def find_max_removals_allowed(num_allowed_at_hubs, max_removals_non_hubs):
max_allowed = max(num_allowed_at_hubs.keys())
if only_one_removal_can_happen_at_hubs_but_up_to_two_removals_can_happen_at_non_hubs(max_allowed, max_removals_non_hubs):
max_allowed = 2
return max_allowed
def only_one_removal_can_happen_at_hubs_but_up_to_two_removals_can_happen_at_non_hubs(max_allowed, max_removals_non_hubs):
return (max_allowed == 1) and (max_removals_non_hubs == 2)
def find_all_valid_sums_for_current_num_removals(num_removals, num_allowed_at_hubs, num_different_removals_hubs, num_different_removals_non_hubs, ranges, max_removals_non_hubs, max_allowed):
all_sums = find_all_sums(num_removals)
unique_sums_not_validated = get_unique_list_of_lists(all_sums)
sums_validated = []
for values_to_sum in unique_sums_not_validated:
if too_many_values_in_this_sum_than_possible_for_a_possible_removal_situation(values_to_sum, num_different_removals_hubs, num_different_removals_non_hubs):
continue
elif a_value_in_the_sum_exceeds_the_max_allowed(values_to_sum, num_allowed_at_hubs, max_removals_non_hubs, max_allowed):
continue
elif only_one_removal_is_allowed_anywhere(ranges, max_allowed):
sums_validated.append(values_to_sum)
elif values_in_sum_invalid_due_to_max_num_removals_possible(values_to_sum, ranges, num_allowed_at_hubs, max_removals_non_hubs, num_different_removals_hubs):
continue
else:
sums_validated.append(values_to_sum)
return sums_validated
def find_all_sums(n):
beginning, middle, end = [0], list(range(1, n)), [n]
splits = (d for i in range(n) for d in combinations(middle, i))
return (list(map(sub, chain(split, end), chain(beginning, split))) for split in splits)
def get_unique_list_of_lists(a_list):
unique_list_of_lists = []
for l in a_list:
l.sort()
if l not in unique_list_of_lists:
unique_list_of_lists.append(l)
return unique_list_of_lists
def too_many_values_in_this_sum_than_possible_for_a_possible_removal_situation(values_to_sum, num_different_removals_hubs, num_different_removals_non_hubs):
return (len(values_to_sum) > (num_different_removals_hubs + num_different_removals_non_hubs))
def a_value_in_the_sum_exceeds_the_max_allowed(values_to_sum, num_allowed_at_hubs, max_removals_non_hubs, max_allowed):
for value in values_to_sum:
if value > max_allowed:
return True
return False
def only_one_removal_is_allowed_anywhere(ranges, max_allowed):
if only_one_range_of_values_to_search(ranges):
if range_to_search_is_for_1_removal(ranges[0]):
if max_allowed == 1:
return True
return False
def only_one_range_of_values_to_search(ranges):
return (len(ranges) == 1)
def range_to_search_is_for_1_removal(range_to_search):
return (range_to_search == [1, 1])
def values_in_sum_invalid_due_to_max_num_removals_possible(values_to_sum, ranges, num_allowed_at_hubs, max_removals_non_hubs, num_different_removals_hubs):
num_allowed_in_each_range = get_num_allowed_in_each_range_to_edit(num_allowed_at_hubs)
num_actually_in_each_range = get_num_actually_in_each_range_to_edit(num_allowed_in_each_range)
num_allowed_in_each_range, num_actually_in_each_range, ranges = update_ranges_and_removals_allowed_to_reflect_values_to_sum(
num_allowed_in_each_range=num_allowed_in_each_range,
num_actually_in_each_range=num_actually_in_each_range,
ranges=ranges,
values_to_sum=values_to_sum,
max_removals_non_hubs=max_removals_non_hubs,
num_different_removals_hubs=num_different_removals_hubs)
num_actually_in_each_range = update_num_actually_in_each_range_to_reflect_values_to_sum(
num_actually_in_each_range=num_actually_in_each_range,
ranges=ranges,
values_to_sum=values_to_sum)
if values_in_sum_invalid(num_allowed_in_each_range, num_actually_in_each_range):
return True
else:
return False
def get_num_allowed_in_each_range_to_edit(num_allowed_at_hubs):
num_allowed_in_each_range = copy.deepcopy(num_allowed_at_hubs)
if 0 in num_allowed_in_each_range:
del num_allowed_in_each_range[0]
return num_allowed_in_each_range
def get_num_actually_in_each_range_to_edit(num_allowed_in_each_range):
num_actually_in_each_range = copy.deepcopy(num_allowed_in_each_range)
for max_removals, max_allowed in num_actually_in_each_range.items():
num_actually_in_each_range[max_removals] = 0
return num_actually_in_each_range
def update_ranges_and_removals_allowed_to_reflect_values_to_sum(num_allowed_in_each_range, num_actually_in_each_range, ranges, values_to_sum, max_removals_non_hubs, num_different_removals_hubs):
if max_removals_non_hubs == 2:
if non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1_1(values_to_sum):
if having_2_removals_at_any_non_hub_is_not_possible(values_to_sum, num_different_removals_hubs):
ranges = update_ranges_to_include_new_range(ranges, [1, 1])
else:
ranges = update_ranges_to_include_new_range(ranges, [2, 2])
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range)
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=2)
elif non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1(values_to_sum):
ranges = update_ranges_to_include_new_range(ranges, [2, 2])
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range)
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=1)
elif non_hubs_can_have_2_removals_and_values_to_sum_contain_2(values_to_sum):
ranges = update_ranges_to_include_new_range(ranges, [2, 2])
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range)
elif non_hubs_can_have_2_removals_and_values_to_sum_contains_1_1(values_to_sum):
ranges = update_ranges_to_include_new_range(ranges, [1, 1])
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=2)
elif non_hubs_can_have_2_removals_and_values_to_sum_contains_1(values_to_sum):
ranges = update_ranges_to_include_new_range(ranges, [1, 1])
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=1)
else:
if non_hubs_can_have_1_removal_and_values_to_sum_conains_1(values_to_sum):
ranges = update_ranges_to_include_new_range(ranges, [1, 1])
num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=1)
return num_allowed_in_each_range, num_actually_in_each_range, ranges
def update_ranges_to_include_new_range(ranges, new_r):
new_ranges = ranges[:]
if new_r not in ranges:
new_r_min, new_r_max = new_r[0], new_r[1]
for r in ranges:
r_min, r_max = r[0], r[1]
if r_min == (new_r_min - 1):
if r_max == new_r_max:
new_ranges.append(new_r)
else:
new_ranges.remove(r)
new_ranges.append([new_r_min - 1, new_r_max - 1])
new_ranges.append(new_r)
if r_max > new_r_max:
new_ranges.append([new_r_min - 1, r_max])
return sort_and_reverse_list(new_ranges)
elif r_min == new_r_min:
new_ranges.remove(r)
new_ranges.append(new_r)
new_ranges.append([new_r_min + 1, r_max])
return sort_and_reverse_list(new_ranges)
return sort_and_reverse_list(new_ranges)
def update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range):
if 2 not in num_allowed_in_each_range:
num_allowed_in_each_range[2] = 1
num_actually_in_each_range[2] = 0
else:
num_allowed_in_each_range[2] += 1
return num_allowed_in_each_range, num_actually_in_each_range
def update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible):
if 1 not in num_allowed_in_each_range:
num_allowed_in_each_range[1] = num_possible
num_actually_in_each_range[1] = 0
else:
num_allowed_in_each_range[1] += num_possible
return num_allowed_in_each_range, num_actually_in_each_range
def non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1_1(values_to_sum):
return (values_to_sum.count(2) > 0) and (values_to_sum.count(1) >= 2)
def non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1(values_to_sum):
return (values_to_sum.count(2) > 0) and (values_to_sum.count(1) == 1)
def non_hubs_can_have_2_removals_and_values_to_sum_contain_2(values_to_sum):
return (values_to_sum.count(2) > 0) and (values_to_sum.count(1) == 0)
def non_hubs_can_have_2_removals_and_values_to_sum_contains_1_1(values_to_sum):
return (values_to_sum.count(1) >= 2)
def non_hubs_can_have_2_removals_and_values_to_sum_contains_1(values_to_sum):
return (values_to_sum.count(1) == 1)
def non_hubs_can_have_1_removal_and_values_to_sum_conains_1(values_to_sum):
return (values_to_sum.count(1) >= 1)
def having_2_removals_at_any_non_hub_is_not_possible(values_to_sum, num_different_removals_hubs):
return ((len(values_to_sum) - 1) > num_different_removals_hubs)
def update_num_actually_in_each_range_to_reflect_values_to_sum(num_actually_in_each_range, ranges, values_to_sum):
for current_range in ranges:
current_min = current_range[0]
current_max = current_range[1]
for value in values_to_sum:
if value_within_range(value, current_min, current_max):
num_actually_in_each_range[current_max] += 1
return num_actually_in_each_range
def values_in_sum_invalid(num_allowed_in_each_range, num_actually_in_each_range):
all_max_num_removals = list(num_allowed_in_each_range.keys())
all_max_num_removals = sort_and_reverse_list(all_max_num_removals)
count = 1
for num_removals in all_max_num_removals:
if values_in_sum_exceed_removals_allowed_for_that_range(num_allowed_in_each_range, num_actually_in_each_range, num_removals):
return True
if all_max_num_removals_have_not_been_iterated_yet(count, all_max_num_removals):
num_allowed_in_each_range = add_num_not_used_in_range_to_next_range_to_iterate(num_allowed_in_each_range, num_actually_in_each_range, num_removals, all_max_num_removals, count)
count += 1
return False
def values_in_sum_exceed_removals_allowed_for_that_range(num_allowed_in_each_range, num_actually_in_each_range, num_removals):
return (num_actually_in_each_range[num_removals] > num_allowed_in_each_range[num_removals])
def all_max_num_removals_have_not_been_iterated_yet(count, all_max_num_removals):
return (count != len(all_max_num_removals))
def add_num_not_used_in_range_to_next_range_to_iterate(num_allowed_in_each_range, num_actually_in_each_range, num_removals, all_max_num_removals, count):
num_removals_leftover = (num_allowed_in_each_range[num_removals] - num_actually_in_each_range[num_removals])
num_allowed_in_each_range[all_max_num_removals[count]] += num_removals_leftover
return num_allowed_in_each_range
def sort_and_reverse_list(a_list):
a_list.sort()
a_list.reverse()
return a_list
def value_within_range(value, current_min, current_max):
return ((value >= current_min) and (value <= current_max))
class RemovalsGenerator:
def __init__(self, engine_subtype, removals_info, removal_sums, ranges):
logging.info("Initializing RemovalsGenerator for the " + engine_subtype + " engine...")
self.engine_subtype = engine_subtype
self.max_removals_ATL = removals_info['MAX_NUM_REMOVALS_MONTHLY_ATL']
self.max_removals_CVG = removals_info['MAX_NUM_REMOVALS_MONTHLY_CVG']
self.max_removals_DTW = removals_info['MAX_NUM_REMOVALS_MONTHLY_DTW']
self.max_removals_LAX = removals_info['MAX_NUM_REMOVALS_MONTHLY_LAX']
self.max_removals_MSP = removals_info['MAX_NUM_REMOVALS_MONTHLY_MSP']
self.max_removals_SEA = removals_info['MAX_NUM_REMOVALS_MONTHLY_SEA']
self.max_removals_SLC = removals_info['MAX_NUM_REMOVALS_MONTHLY_SLC']
self.max_removals_hubs_dict = {'ATL': self.max_removals_ATL, 'CVG': self.max_removals_CVG, 'DTW': self.max_removals_DTW, 'LAX': self.max_removals_LAX, 'MSP': self.max_removals_MSP, 'SEA': self.max_removals_SEA, 'SLC': self.max_removals_SLC}
self.max_removals_hubs_list = [self.max_removals_ATL, self.max_removals_CVG, self.max_removals_DTW, self.max_removals_LAX, self.max_removals_MSP, self.max_removals_SEA, self.max_removals_SLC]
self.max_removals_non_hubs = removals_info['MAX_NUM_REMOVALS_MONTHLY_NON_HUBS']
self.max_different_removals_hubs = 7 - self.max_removals_hubs_list.count(0)
self.max_removals_total = removals_info['MAX_NUM_REMOVALS_MONTHLY_TOTAL']
self.removal_sums = removal_sums
self.ranges = ranges
self.indices_where_removals_should_not_occur = []
self.find_indices_where_removals_should_not_occur()
self.num_all = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52]
self.num_hubs = [0, 1, 2, 3, 4, 5, 6]
self.num_non_hubs = [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52]
self.remove_indices_from_lists_where_removals_should_not_occur()
self.valid_indices_for_ranges = {}
self.find_indices_for_ranges()
self.all_perms = []
self.zero_list_all = [0] * 53
self.num_all_2_removals = []
self.find_all_values_where_2_removals_can_occur()
self.indices_to_iterate_for_current_values = []
self.indices_to_iterate_for_current_values_again = []
self.will_need_to_iterate_twice = False
self.more_than_one_value_must_occur_at_non_hubs = False
data.all_possible_removal_situations[self.engine_subtype] = []
def find_all_values_where_2_removals_can_occur(self):
for r in self.ranges:
current_min = r[0]
current_max = r[1]
if (2 >= current_min) and (2 <= current_max):
self.num_all_2_removals = self.valid_indices_for_ranges[str(r)][:]
self.num_all_2_removals.extend(self.num_non_hubs)
def find_indices_where_removals_should_not_occur(self):
for index in range(7):
if self.max_removals_hubs_list[index] == 0:
self.indices_where_removals_should_not_occur.append(index)
def remove_indices_from_lists_where_removals_should_not_occur(self):
for index in sorted(self.indices_where_removals_should_not_occur, reverse=True):
del self.num_all[index]
del self.num_hubs[index]
def find_indices_for_ranges(self):
for r in self.ranges:
self.valid_indices_for_ranges[str(r)] = []
for i in range(len(self.max_removals_hubs_list)):
hub_max_removals = self.max_removals_hubs_list[i]
for r in self.ranges:
current_min = r[0]
if hub_max_removals >= current_min:
self.valid_indices_for_ranges[str(r)].append(i)
def generate_all_removal_situations(self):
for num_removals, all_sums in self.removal_sums.items():
current_num_removals = num_removals
for values in all_sums:
self.find_indices_to_iterate(values)
if len(values) == 1:
self.one_value(values)
elif len(values) == 2:
self.two_values(values)
elif len(values) == 3:
self.three_values(values)
elif len(values) == 4:
self.four_values(values)
elif len(values) == 5:
self.five_values(values)
elif len(values) == 6:
self.six_values(values)
elif len(values) == 7:
self.seven_values(values)
elif len(values) == 8:
self.eight_values(values)
elif len(values) == 9:
self.nine_values(values)
logging.info("All removal situations for " + self.engine_subtype + " have been generated.")
writer_util.export_all_possible_removal_situations(
filepath='data_to_read/' + self.engine_subtype + '/' + self.engine_subtype + '_all_possible_removal_situations.csv',
engine_subtype=self.engine_subtype,
all_possible_removal_situations=data.all_possible_removal_situations[self.engine_subtype])
def indices_not_equal(self, list_of_indices):
set_of_indices = set(list_of_indices)
list_of_set_of_indices = list(set_of_indices)
if len(list_of_set_of_indices) != len(list_of_indices):
return False
return True
def make_perms_unique_and_add_to_all_perms(self, perms):
perms.sort()
unique_perms_list = list(perm for perm,_ in groupby(perms))
data.all_possible_removal_situations[self.engine_subtype].extend(unique_perms_list)
return unique_perms_list
def one_value(self, values):
perms = []
for i in self.indices_to_iterate_for_current_values[0]:
current_list = self.zero_list_all[:]
current_list[i] = values[0]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def two_values(self, values):
perms = []
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
if self.indices_not_equal([i, j]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def three_values(self, values):
perms = []
if self.will_need_to_iterate_twice:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
if self.indices_not_equal([i, j, k]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
perms.append(current_list)
for i in self.indices_to_iterate_for_current_values_again[0]:
for j in self.indices_to_iterate_for_current_values_again[1]:
for k in self.indices_to_iterate_for_current_values_again[2]:
if self.indices_not_equal([i, j, k]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
perms.append(current_list)
else:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
if self.indices_not_equal([i, j, k]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def four_values(self, values):
perms = []
if self.will_need_to_iterate_twice:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
if self.indices_not_equal([i, j, k, l]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
perms.append(current_list)
for i in self.indices_to_iterate_for_current_values_again[0]:
for j in self.indices_to_iterate_for_current_values_again[1]:
for k in self.indices_to_iterate_for_current_values_again[2]:
for l in self.indices_to_iterate_for_current_values_again[3]:
if self.indices_not_equal([i, j, k, l]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
perms.append(current_list)
else:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
if self.indices_not_equal([i, j, k, l]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def five_values(self, values):
perms = []
if self.will_need_to_iterate_twice:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
if self.indices_not_equal([i, j, k, l, m]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
perms.append(current_list)
for i in self.indices_to_iterate_for_current_values_again[0]:
for j in self.indices_to_iterate_for_current_values_again[1]:
for k in self.indices_to_iterate_for_current_values_again[2]:
for l in self.indices_to_iterate_for_current_values_again[3]:
for m in self.indices_to_iterate_for_current_values_again[4]:
if self.indices_not_equal([i, j, k, l, m]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
perms.append(current_list)
else:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
if self.indices_not_equal([i, j, k, l, m]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def six_values(self, values):
perms = []
if self.will_need_to_iterate_twice:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
if self.indices_not_equal([i, j, k, l, m, n]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
perms.append(current_list)
for i in self.indices_to_iterate_for_current_values_again[0]:
for j in self.indices_to_iterate_for_current_values_again[1]:
for k in self.indices_to_iterate_for_current_values_again[2]:
for l in self.indices_to_iterate_for_current_values_again[3]:
for m in self.indices_to_iterate_for_current_values_again[4]:
for n in self.indices_to_iterate_for_current_values_again[5]:
if self.indices_not_equal([i, j, k, l, m, n]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
perms.append(current_list)
else:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
if self.indices_not_equal([i, j, k, l, m, n]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def seven_values(self, values):
perms = []
if self.will_need_to_iterate_twice:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
for o in self.indices_to_iterate_for_current_values[6]:
if self.indices_not_equal([i, j, k, l, m, n, o]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
perms.append(current_list)
for i in self.indices_to_iterate_for_current_values_again[0]:
for j in self.indices_to_iterate_for_current_values_again[1]:
for k in self.indices_to_iterate_for_current_values_again[2]:
for l in self.indices_to_iterate_for_current_values_again[3]:
for m in self.indices_to_iterate_for_current_values_again[4]:
for n in self.indices_to_iterate_for_current_values_again[5]:
for o in self.indices_to_iterate_for_current_values_again[6]:
if self.indices_not_equal([i, j, k, l, m, n, o]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
perms.append(current_list)
else:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
for o in self.indices_to_iterate_for_current_values[6]:
if self.indices_not_equal([i, j, k, l, m, n, o]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def eight_values(self, values):
perms = []
if self.will_need_to_iterate_twice:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
for o in self.indices_to_iterate_for_current_values[6]:
for p in self.indices_to_iterate_for_current_values[7]:
if self.indices_not_equal([i, j, k, l, m, n, o, p]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
current_list[p] = values[7]
perms.append(current_list)
for i in self.indices_to_iterate_for_current_values_again[0]:
for j in self.indices_to_iterate_for_current_values_again[1]:
for k in self.indices_to_iterate_for_current_values_again[2]:
for l in self.indices_to_iterate_for_current_values_again[3]:
for m in self.indices_to_iterate_for_current_values_again[4]:
for n in self.indices_to_iterate_for_current_values_again[5]:
for o in self.indices_to_iterate_for_current_values_again[6]:
for p in self.indices_to_iterate_for_current_values_again[7]:
if self.indices_not_equal([i, j, k, l, m, n, o, p]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
current_list[p] = values[7]
perms.append(current_list)
else:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
for o in self.indices_to_iterate_for_current_values[6]:
for p in self.indices_to_iterate_for_current_values[7]:
if self.indices_not_equal([i, j, k, l, m, n, o, p]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
current_list[p] = values[7]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def nine_values(self, values):
perms = []
if self.will_need_to_iterate_twice:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
for o in self.indices_to_iterate_for_current_values[6]:
for p in self.indices_to_iterate_for_current_values[7]:
for q in self.indices_to_iterate_for_current_values[8]:
if self.indices_not_equal([i, j, k, l, m, n, o, p, q]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
current_list[p] = values[7]
current_list[q] = values[8]
perms.append(current_list)
for i in self.indices_to_iterate_for_current_values_again[0]:
for j in self.indices_to_iterate_for_current_values_again[1]:
for k in self.indices_to_iterate_for_current_values_again[2]:
for l in self.indices_to_iterate_for_current_values_again[3]:
for m in self.indices_to_iterate_for_current_values_again[4]:
for n in self.indices_to_iterate_for_current_values_again[5]:
for o in self.indices_to_iterate_for_current_values_again[6]:
for p in self.indices_to_iterate_for_current_values_again[7]:
for q in self.indices_to_iterate_for_current_values_again[8]:
if self.indices_not_equal([i, j, k, l, m, n, o, p, q]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
current_list[p] = values[7]
current_list[q] = values[8]
perms.append(current_list)
else:
for i in self.indices_to_iterate_for_current_values[0]:
for j in self.indices_to_iterate_for_current_values[1]:
for k in self.indices_to_iterate_for_current_values[2]:
for l in self.indices_to_iterate_for_current_values[3]:
for m in self.indices_to_iterate_for_current_values[4]:
for n in self.indices_to_iterate_for_current_values[5]:
for o in self.indices_to_iterate_for_current_values[6]:
for p in self.indices_to_iterate_for_current_values[7]:
for q in self.indices_to_iterate_for_current_values[8]:
if self.indices_not_equal([i, j, k, l, m, n, o, p, q]):
current_list = self.zero_list_all[:]
current_list[i] = values[0]
current_list[j] = values[1]
current_list[k] = values[2]
current_list[l] = values[3]
current_list[m] = values[4]
current_list[n] = values[5]
current_list[o] = values[6]
current_list[p] = values[7]
current_list[q] = values[8]
perms.append(current_list)
unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms)
num_unique_perms = len(unique_perms_list)
def reset_values_to_sum_variables(self):
self.indices_to_iterate_for_current_values = []
self.indices_to_iterate_for_current_values_again = []
self.will_need_to_iterate_twice = False
self.more_than_one_value_must_occur_at_non_hubs = False
def more_than_one_value_must_occur_outside_of_hubs(self, values):
return ((len(values) - 1) > self.max_different_removals_hubs)
def values_to_sum_contain_less_than_two_ones(self, values):
return (values.count(1) >= 2)
def append_to_beginning_of_indices_to_iterate_for_current_values(self, list_of_lists_to_append):
for a_list in list_of_lists_to_append:
self.indices_to_iterate_for_current_values.append(a_list)
def append_to_beginning_of_indices_to_iterate_for_current_values_again(self, list_of_lists_to_append):
for a_list in list_of_lists_to_append:
self.indices_to_iterate_for_current_values_again.append(a_list)
def append_to_end_of_indices_to_iterate_for_current_values(self, values_to_edit):
for value in values_to_edit:
for r in self.ranges:
current_min = r[0]
current_max = r[1]
if (value >= current_min) and (value <= current_max):
self.indices_to_iterate_for_current_values.append(self.valid_indices_for_ranges[str(r)])
def append_to_end_of_indices_to_iterate_for_current_values_again(self, values_to_edit):
for value in values_to_edit:
for r in self.ranges:
current_min = r[0]
current_max = r[1]
if (value >= current_min) and (value <= current_max):
self.indices_to_iterate_for_current_values_again.append(self.valid_indices_for_ranges[str(r)])
def remove_values_for_which_index_lists_have_been_found(self, values_to_edit, values_to_remove):
for value in values_to_remove:
values_to_edit.remove(value)
return values_to_edit
def find_indices_to_iterate(self, values):
self.reset_values_to_sum_variables()
values_to_edit = values[:]
if self.max_removals_non_hubs == 2:
if self.more_than_one_value_must_occur_outside_of_hubs(values):
self.more_than_one_value_must_occur_at_non_hubs = True
assert self.values_to_sum_contain_at_least_two_ones(values), "Two values must occur at non-hubs to generate permutations for this sum."
else:
self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_non_hubs, self.num_non_hubs])
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 1])
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
else:
if values.count(2) > 0:
if values.count(1) >= 2: # 2 occurs at least once, 1 occurs at least twice
self.will_need_to_iterate_twice = True
# first iteration
list_of_lists_to_append = []
list_of_ones_and_two = []
for i in range(values.count(1)):
list_of_ones_and_two.append(1)
list_of_lists_to_append.append(self.num_hubs) # all 1s iterate through hubs only
list_of_ones_and_two.append(2)
list_of_lists_to_append.append(self.num_non_hubs) # 2 iterates through non-hubs only
self.append_to_beginning_of_indices_to_iterate_for_current_values(list_of_lists_to_append)
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, list_of_ones_and_two)
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
# second iteration
values_to_edit = values[:]
self.append_to_beginning_of_indices_to_iterate_for_current_values_again([self.num_all, self.num_all]) # two 1s iterate through everything
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 1])
self.append_to_end_of_indices_to_iterate_for_current_values_again(values_to_edit)
elif values.count(1) == 1: # 2 occurs at least once, 1 occurs only once
self.will_need_to_iterate_twice = True
# first iteration
self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_hubs, self.num_non_hubs])
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 2])
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
# second iteration
values_to_edit = values[:]
self.append_to_beginning_of_indices_to_iterate_for_current_values_again([self.num_all])
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1])
self.append_to_end_of_indices_to_iterate_for_current_values_again(values_to_edit)
elif values.count(1) == 0: # 2 occurs at least once, 1 never occurs
self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all_2_removals])
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [2])
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
elif values.count(2) == 0:
if values.count(1) >= 2: # 2 never occurs, 1 occurs at least twice
self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all, self.num_all])
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 1])
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
elif values.count(1) == 1: # 2 never occurs, 1 occurs once
self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all])
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1])
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
elif values.count(1) == 0: # 2 never occurs, 1 never occurs
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
else:
if values.count(1) > 0:
self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all])
values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1])
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
if values.count(1) == 0:
self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
| 46.426391
| 252
| 0.749222
|
4a0fc56d8df40ffb6976701cfb10344b5102675b
| 16,324
|
py
|
Python
|
scripts/install_third_party_libs_test.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | 2
|
2021-04-08T01:06:08.000Z
|
2021-06-02T08:20:13.000Z
|
scripts/install_third_party_libs_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-05-27T06:08:17.000Z
|
2020-05-27T06:08:17.000Z
|
scripts/install_third_party_libs_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-12-11T06:56:31.000Z
|
2020-12-11T06:56:31.000Z
|
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/install_third_party_libs.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import os
import shutil
import subprocess
import tempfile
from core.tests import test_utils
import python_utils
from . import common
from . import install_backend_python_libs
from . import install_third_party
from . import install_third_party_libs
from . import pre_commit_hook
from . import pre_push_hook
from . import setup
from . import setup_gae
RELEASE_TEST_DIR = os.path.join('core', 'tests', 'release_sources', '')
class InstallThirdPartyLibsTests(test_utils.GenericTestBase):
"""Test the methods for installing third party libs."""
def setUp(self):
super(InstallThirdPartyLibsTests, self).setUp()
self.check_function_calls = {
'check_call_is_called': False,
}
self.print_arr = []
def mock_check_call(unused_cmd_tokens, *args, **kwargs): # pylint: disable=unused-argument
self.check_function_calls['check_call_is_called'] = True
class Ret(python_utils.OBJECT):
"""Return object with required attributes."""
def __init__(self):
self.returncode = 0
def communicate(self):
"""Return required meathod."""
return '', ''
return Ret()
def mock_popen_error_call(unused_cmd_tokens, *args, **kwargs): # pylint: disable=unused-argument
class Ret(python_utils.OBJECT):
"""Return object that gives user-prefix error."""
def __init__(self):
self.returncode = 1
def communicate(self):
"""Return user-prefix error as stderr."""
return '', 'can\'t combine user with prefix'
return Ret()
def mock_print(msg, end=''): # pylint: disable=unused-argument
self.print_arr.append(msg)
self.check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
self.Popen_swap = self.swap(
subprocess, 'Popen', mock_check_call)
self.Popen_error_swap = self.swap(
subprocess, 'Popen', mock_popen_error_call)
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def test_tweak_yarn_executable(self):
def mock_is_file(unused_filename):
return True
def mock_rename(origin_name, new_name):
self.assertEqual(origin_name + '.sh', new_name)
mock_rename.called = True
mock_rename.called = False
isfile_swap = self.swap(os.path, 'isfile', mock_is_file)
rename_swap = self.swap(os, 'rename', mock_rename)
with isfile_swap, rename_swap:
install_third_party_libs.tweak_yarn_executable()
self.assertTrue(mock_rename.called)
def test_get_yarn_command_on_windows(self):
os_name_swap = self.swap(common, 'OS_NAME', 'Windows')
with os_name_swap:
command = install_third_party_libs.get_yarn_command()
self.assertEqual(command, 'yarn.cmd')
def test_get_yarn_command_on_linux(self):
os_name_swap = self.swap(common, 'OS_NAME', 'Linux')
with os_name_swap:
command = install_third_party_libs.get_yarn_command()
self.assertEqual(command, 'yarn')
def test_get_yarn_command_on_mac(self):
os_name_swap = self.swap(common, 'OS_NAME', 'Darwin')
with os_name_swap:
command = install_third_party_libs.get_yarn_command()
self.assertEqual(command, 'yarn')
def test_ensure_pip_library_is_installed(self):
check_function_calls = {
'pip_install_is_called': False
}
def mock_exists(unused_path):
return False
def mock_pip_install(unused_package, unused_version, unused_path):
check_function_calls['pip_install_is_called'] = True
exists_swap = self.swap(os.path, 'exists', mock_exists)
pip_install_swap = self.swap(
install_backend_python_libs, 'pip_install', mock_pip_install)
with exists_swap, pip_install_swap:
install_third_party_libs.ensure_pip_library_is_installed(
'package', 'version', 'path')
self.assertTrue(check_function_calls['pip_install_is_called'])
def test_function_calls(self):
check_function_calls = {
'ensure_pip_library_is_installed_is_called': False,
'install_third_party_main_is_called': False,
'setup_main_is_called': False,
'setup_gae_main_is_called': False,
'pre_commit_hook_main_is_called': False,
'pre_push_hook_main_is_called': False,
'tweak_yarn_executable_is_called': False
}
expected_check_function_calls = {
'ensure_pip_library_is_installed_is_called': True,
'install_third_party_main_is_called': True,
'setup_main_is_called': True,
'setup_gae_main_is_called': True,
'pre_commit_hook_main_is_called': True,
'pre_push_hook_main_is_called': True,
'tweak_yarn_executable_is_called': False
}
def mock_ensure_pip_library_is_installed(
unused_package, unused_version, unused_path):
check_function_calls[
'ensure_pip_library_is_installed_is_called'] = True
def mock_check_call(unused_cmd_tokens):
pass
def mock_main_for_install_third_party(args): # pylint: disable=unused-argument
check_function_calls['install_third_party_main_is_called'] = True
def mock_main_for_setup(args): # pylint: disable=unused-argument
check_function_calls['setup_main_is_called'] = True
def mock_main_for_setup_gae(args): # pylint: disable=unused-argument
check_function_calls['setup_gae_main_is_called'] = True
def mock_main_for_pre_commit_hook(args): # pylint: disable=unused-argument
check_function_calls['pre_commit_hook_main_is_called'] = True
def mock_main_for_pre_push_hook(args): # pylint: disable=unused-argument
check_function_calls['pre_push_hook_main_is_called'] = True
def mock_tweak_yarn_executable():
check_function_calls['tweak_yarn_executable_is_called'] = True
correct_google_path = os.path.join(
common.THIRD_PARTY_PYTHON_LIBS_DIR, 'google')
def mock_is_dir(path):
directories_that_do_not_exist = {
os.path.join(correct_google_path, 'appengine'),
os.path.join(correct_google_path, 'net'),
os.path.join(correct_google_path, 'pyglib'),
correct_google_path
}
if path in directories_that_do_not_exist:
return False
return True
initialized_directories = []
def mock_mk_dir(path):
initialized_directories.append(path)
copied_src_dst_tuples = []
def mock_copy_tree(src, dst):
copied_src_dst_tuples.append((src, dst))
correct_copied_src_dst_tuples = [
(
os.path.join(
common.GOOGLE_APP_ENGINE_SDK_HOME, 'google', 'appengine'),
os.path.join(correct_google_path, 'appengine')),
(
os.path.join(
common.GOOGLE_APP_ENGINE_SDK_HOME, 'google', 'net'),
os.path.join(correct_google_path, 'net')),
(
os.path.join(
common.GOOGLE_APP_ENGINE_SDK_HOME, 'google', 'pyglib'),
os.path.join(correct_google_path, 'pyglib'))
]
ensure_pip_install_swap = self.swap(
install_third_party_libs, 'ensure_pip_library_is_installed',
mock_ensure_pip_library_is_installed)
swap_is_dir = self.swap(os.path, 'isdir', mock_is_dir)
swap_mk_dir = self.swap(os, 'mkdir', mock_mk_dir)
swap_copy_tree = self.swap(shutil, 'copytree', mock_copy_tree)
check_call_swap = self.swap(subprocess, 'check_call', mock_check_call)
install_third_party_main_swap = self.swap(
install_third_party, 'main', mock_main_for_install_third_party)
setup_main_swap = self.swap(setup, 'main', mock_main_for_setup)
setup_gae_main_swap = self.swap(
setup_gae, 'main', mock_main_for_setup_gae)
pre_commit_hook_main_swap = self.swap(
pre_commit_hook, 'main', mock_main_for_pre_commit_hook)
pre_push_hook_main_swap = self.swap(
pre_push_hook, 'main', mock_main_for_pre_push_hook)
tweak_yarn_executable_swap = self.swap(
install_third_party_libs, 'tweak_yarn_executable',
mock_tweak_yarn_executable)
py_actual_text = (
'ConverterMapping,\nLine ending with '
'"ConverterMapping",\nOther Line\n')
py_expected_text = ('Line ending with \nOther Line\n')
temp_py_config_file = tempfile.NamedTemporaryFile(prefix='py').name
with python_utils.open_file(temp_py_config_file, 'w') as f:
f.write(py_actual_text)
pq_actual_text = (
'ConverterMapping,\n"ConverterMapping",\nOther Line\n')
pq_expected_text = ('Other Line\n')
temp_pq_config_file = tempfile.NamedTemporaryFile(prefix='pq').name
with python_utils.open_file(temp_pq_config_file, 'w') as f:
f.write(pq_actual_text)
py_config_swap = self.swap(
install_third_party_libs, 'PYLINT_CONFIGPARSER_FILEPATH',
temp_py_config_file)
pq_config_swap = self.swap(
install_third_party_libs, 'PQ_CONFIGPARSER_FILEPATH',
temp_pq_config_file)
with ensure_pip_install_swap, check_call_swap, self.Popen_swap:
with install_third_party_main_swap, setup_main_swap:
with setup_gae_main_swap, pre_commit_hook_main_swap:
with pre_push_hook_main_swap, py_config_swap:
with pq_config_swap, tweak_yarn_executable_swap:
with swap_is_dir, swap_mk_dir, swap_copy_tree:
install_third_party_libs.main()
self.assertEqual(check_function_calls, expected_check_function_calls)
with python_utils.open_file(temp_py_config_file, 'r') as f:
self.assertEqual(f.read(), py_expected_text)
with python_utils.open_file(temp_pq_config_file, 'r') as f:
self.assertEqual(f.read(), pq_expected_text)
self.assertEqual(
copied_src_dst_tuples, correct_copied_src_dst_tuples)
self.assertEqual(
initialized_directories,
[correct_google_path])
def test_function_calls_on_windows(self):
check_function_calls = {
'ensure_pip_library_is_installed_is_called': False,
'install_third_party_main_is_called': False,
'setup_main_is_called': False,
'setup_gae_main_is_called': False,
'pre_commit_hook_main_is_called': False,
'pre_push_hook_main_is_called': False,
'tweak_yarn_executable_is_called': False
}
expected_check_function_calls = {
'ensure_pip_library_is_installed_is_called': True,
'install_third_party_main_is_called': True,
'setup_main_is_called': True,
'setup_gae_main_is_called': True,
'pre_commit_hook_main_is_called': True,
'pre_push_hook_main_is_called': False,
'tweak_yarn_executable_is_called': True
}
def mock_ensure_pip_library_is_installed(
unused_package, unused_version, unused_path):
check_function_calls[
'ensure_pip_library_is_installed_is_called'] = True
def mock_check_call(unused_cmd_tokens):
pass
def mock_main_for_install_third_party(args): # pylint: disable=unused-argument
check_function_calls['install_third_party_main_is_called'] = True
def mock_main_for_setup(args): # pylint: disable=unused-argument
check_function_calls['setup_main_is_called'] = True
def mock_main_for_setup_gae(args): # pylint: disable=unused-argument
check_function_calls['setup_gae_main_is_called'] = True
def mock_main_for_pre_commit_hook(args): # pylint: disable=unused-argument
check_function_calls['pre_commit_hook_main_is_called'] = True
def mock_main_for_pre_push_hook(args): # pylint: disable=unused-argument
check_function_calls['pre_push_hook_main_is_called'] = True
def mock_tweak_yarn_executable():
check_function_calls['tweak_yarn_executable_is_called'] = True
ensure_pip_install_swap = self.swap(
install_third_party_libs, 'ensure_pip_library_is_installed',
mock_ensure_pip_library_is_installed)
check_call_swap = self.swap(subprocess, 'check_call', mock_check_call)
install_third_party_main_swap = self.swap(
install_third_party, 'main', mock_main_for_install_third_party)
setup_main_swap = self.swap(setup, 'main', mock_main_for_setup)
setup_gae_main_swap = self.swap(
setup_gae, 'main', mock_main_for_setup_gae)
pre_commit_hook_main_swap = self.swap(
pre_commit_hook, 'main', mock_main_for_pre_commit_hook)
pre_push_hook_main_swap = self.swap(
pre_push_hook, 'main', mock_main_for_pre_push_hook)
tweak_yarn_executable_swap = self.swap(
install_third_party_libs, 'tweak_yarn_executable',
mock_tweak_yarn_executable)
os_name_swap = self.swap(common, 'OS_NAME', 'Windows')
py_actual_text = (
'ConverterMapping,\nLine ending with '
'"ConverterMapping",\nOther Line\n')
py_expected_text = ('Line ending with \nOther Line\n')
temp_py_config_file = tempfile.NamedTemporaryFile(prefix='py').name
with python_utils.open_file(temp_py_config_file, 'w') as f:
f.write(py_actual_text)
pq_actual_text = (
'ConverterMapping,\n"ConverterMapping",\nOther Line\n')
pq_expected_text = ('Other Line\n')
temp_pq_config_file = tempfile.NamedTemporaryFile(prefix='pq').name
with python_utils.open_file(temp_pq_config_file, 'w') as f:
f.write(pq_actual_text)
py_config_swap = self.swap(
install_third_party_libs, 'PYLINT_CONFIGPARSER_FILEPATH',
temp_py_config_file)
pq_config_swap = self.swap(
install_third_party_libs, 'PQ_CONFIGPARSER_FILEPATH',
temp_pq_config_file)
with ensure_pip_install_swap, check_call_swap, self.Popen_swap:
with install_third_party_main_swap, setup_main_swap:
with setup_gae_main_swap, pre_commit_hook_main_swap:
with pre_push_hook_main_swap, py_config_swap:
with pq_config_swap, tweak_yarn_executable_swap:
with os_name_swap:
install_third_party_libs.main()
self.assertEqual(check_function_calls, expected_check_function_calls)
with python_utils.open_file(temp_py_config_file, 'r') as f:
self.assertEqual(f.read(), py_expected_text)
with python_utils.open_file(temp_pq_config_file, 'r') as f:
self.assertEqual(f.read(), pq_expected_text)
| 45.344444
| 105
| 0.661419
|
4a0fc69cd484911f615bdecaea1be3ac551172c9
| 609
|
py
|
Python
|
var/spack/repos/builtin/packages/perl-text-format/package.py
|
jameshclrk/spack
|
1f8fcb36091e1d5ae63a2279a958ca3ff57088bf
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2021-03-19T13:12:47.000Z
|
2021-03-19T13:12:47.000Z
|
var/spack/repos/builtin/packages/perl-text-format/package.py
|
jameshclrk/spack
|
1f8fcb36091e1d5ae63a2279a958ca3ff57088bf
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/perl-text-format/package.py
|
jameshclrk/spack
|
1f8fcb36091e1d5ae63a2279a958ca3ff57088bf
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlTextFormat(PerlPackage):
"""Text::Format - Various subroutines to format text"""
homepage = "https://metacpan.org/pod/Text::Format"
url = "https://cpan.metacpan.org/authors/id/S/SH/SHLOMIF/Text-Format-0.61.tar.gz"
version('0.61', sha256='bb8a3b8ff515c85101baf553a769337f944a05cde81f111ae78aff416bf4ae2b')
depends_on('perl-module-build', type='build')
| 33.833333
| 94
| 0.738916
|
4a0fc718bbfef0cd63b338ddbe4b6c11933535dd
| 194
|
py
|
Python
|
code/rotate.py
|
tumuum/prog-book
|
20798408ed14145f5c5e4abbc7cc720976aec4ec
|
[
"MIT"
] | null | null | null |
code/rotate.py
|
tumuum/prog-book
|
20798408ed14145f5c5e4abbc7cc720976aec4ec
|
[
"MIT"
] | null | null | null |
code/rotate.py
|
tumuum/prog-book
|
20798408ed14145f5c5e4abbc7cc720976aec4ec
|
[
"MIT"
] | null | null | null |
def rotate(s, n):
while n !=0:
s = _rotate(s)
n -= 1
return s
def _rotate(s):
return s[1:] + [s[0]]
def rotate2(s, n):
if n == 0:
return s
else:
return rotate2(s[1:]+[s[0]],n-1)
| 12.933333
| 34
| 0.530928
|
4a0fc746619b8fcfcdc8e78efa7222d2ad62f2fd
| 3,403
|
py
|
Python
|
sktime/forecasting/hcrystalball.py
|
MFaroukB/sktime
|
29932fc071ab04797bc2f5c00cd533726b31eb46
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/forecasting/hcrystalball.py
|
MFaroukB/sktime
|
29932fc071ab04797bc2f5c00cd533726b31eb46
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/forecasting/hcrystalball.py
|
MFaroukB/sktime
|
29932fc071ab04797bc2f5c00cd533726b31eb46
|
[
"BSD-3-Clause"
] | 1
|
2021-04-30T08:12:18.000Z
|
2021-04-30T08:12:18.000Z
|
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.base import clone
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
from sktime.forecasting.base._sktime import _SktimeForecaster
from sktime.utils.validation._dependencies import _check_soft_dependencies
_check_soft_dependencies("hcrystalball")
def _check_fh(fh, cutoff):
if fh is not None:
if not fh.is_all_out_of_sample(cutoff):
raise NotImplementedError(
"in-sample prediction are currently not implemented"
)
def _check_index(index):
if not isinstance(index, pd.DatetimeIndex):
raise NotImplementedError(
"`HCrystalBallForecaster` currently only supports `pd.DatetimeIndex`. "
"Please convert the data index to `pd.DatetimeIndex`."
)
return index
def _adapt_y_X(y, X):
"""Adapt fit data to HCB compliant format
Parameters
----------
y : pd.Series
Target variable
X : pd.Series, pd.DataFrame
Exogenous variables
Returns
-------
tuple
y_train - pd.Series with datetime index
X_train - pd.DataFrame with datetime index
and optionally exogenous variables in columns
Raises
------
ValueError
When neither of the argument has Datetime or Period index
"""
index = _check_index(y.index)
X = pd.DataFrame(index=index) if X is None else X
return y, X
def _get_X_pred(X_pred, index):
"""Translate forecast horizon interface to HCB native dataframe
Parameters
----------
X_pred : pd.DataFrame
Exogenous data for predictions
index : pd.DatetimeIndex
Index generated from the forecasting horizon
Returns
-------
pd.DataFrame
index - datetime
columns - exogenous variables (optional)
"""
if X_pred is not None:
_check_index(X_pred.index)
X_pred = pd.DataFrame(index=index) if X_pred is None else X_pred
return X_pred
def _adapt_y_pred(y_pred):
"""Translate wrapper prediction to sktime format
From Dataframe to series
Parameters
----------
y_pred : pd.DataFrame
Returns
-------
pd.Series
Predictions in form of series
"""
return y_pred.iloc[:, 0]
class HCrystalBallForecaster(_OptionalForecastingHorizonMixin, _SktimeForecaster):
def __init__(self, model):
self.model = model
super(HCrystalBallForecaster, self).__init__()
def fit(self, y, X=None, fh=None):
self._is_fitted = False
self._set_y_X(y, X)
self._set_fh(fh)
if fh is not None:
_check_fh(self.fh, self.cutoff)
y, X = _adapt_y_X(y, X)
self.model_ = clone(self.model)
self.model_.fit(X, y)
self._is_fitted = True
return self
def _predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
if return_pred_int:
raise NotImplementedError()
_check_fh(fh, self.cutoff)
X_pred = _get_X_pred(X, index=fh.to_absolute(self.cutoff).to_pandas())
y_pred = self.model_.predict(X=X_pred)
return _adapt_y_pred(y_pred)
def get_fitted_params(self):
raise NotImplementedError()
def _compute_pred_err(self, alphas):
raise NotImplementedError()
| 25.977099
| 84
| 0.652659
|
4a0fc7c6177e37c6570aecac3ea2fd168edbac41
| 1,118
|
py
|
Python
|
models/user.py
|
zbuc/imaghost
|
604af32abab3047f78b9b47aa6c6227e2235d9f3
|
[
"BSD-2-Clause"
] | null | null | null |
models/user.py
|
zbuc/imaghost
|
604af32abab3047f78b9b47aa6c6227e2235d9f3
|
[
"BSD-2-Clause"
] | null | null | null |
models/user.py
|
zbuc/imaghost
|
604af32abab3047f78b9b47aa6c6227e2235d9f3
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import (absolute_import, print_function, division)
import bcrypt
from flask import g
from . import Model
from interfaces.db import db_conn
from ghost_exceptions import NotImplementedException
def hash_password(password):
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
return hashed
class User(Model):
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.admin = kwargs.get('admin', 0)
self.pwhash = kwargs.get('pwhash', None)
if kwargs.get('password'):
self.set_password(kwargs.get('password'))
def set_password(self, password):
self.pwhash = hash_password(password)
def save(self):
# existing user
if self.id:
db_conn.execute('''SELECT * FROM users WHERE id = ?''', self.id)
raise NotImplementedException('''can't modify users yet''')
db_conn.execute('''INSERT INTO users (name, admin, pwhash) VALUES
(?, ?, ?)''', (self.name, self.admin, self.pwhash))
g.db_modified = True
| 28.666667
| 76
| 0.634168
|
4a0fc974952d4b41e6b6cdbe72e9907ef0279cca
| 1,032
|
py
|
Python
|
basis_set_exchange/tests/test_duplicate_slow.py
|
atomse/basis_set_exchange
|
7ffd64082c14d2f61eb43f1c2d44792e8b0e394e
|
[
"BSD-3-Clause"
] | 1
|
2020-12-17T09:55:45.000Z
|
2020-12-17T09:55:45.000Z
|
basis_set_exchange/tests/test_duplicate_slow.py
|
mettlyz/basis_set_exchange
|
2d9f096eb2485f9a72d616a80ace17f15dbfb255
|
[
"BSD-3-Clause"
] | null | null | null |
basis_set_exchange/tests/test_duplicate_slow.py
|
mettlyz/basis_set_exchange
|
2d9f096eb2485f9a72d616a80ace17f15dbfb255
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Test for duplicate data in a basis set
'''
import pytest
import basis_set_exchange as bse
from .common_testvars import bs_names_vers, true_false
from .test_duplicate import _test_duplicates
@pytest.mark.slow
@pytest.mark.parametrize('bs_name,bs_ver', bs_names_vers)
@pytest.mark.parametrize('unc_gen', true_false)
@pytest.mark.parametrize('unc_seg', true_false)
@pytest.mark.parametrize('unc_spdf', true_false)
@pytest.mark.parametrize('make_gen', true_false)
@pytest.mark.parametrize('opt_gen', true_false)
def test_duplicate_slow(bs_name, bs_ver, unc_gen, unc_seg, unc_spdf, opt_gen, make_gen):
'''
Test for any duplicate data in a basis set
'''
bs_dict = bse.get_basis(bs_name, version=bs_ver,
uncontract_general=unc_gen,
uncontract_segmented=unc_seg,
uncontract_spdf=unc_spdf,
make_general=make_gen,
optimize_general=opt_gen)
_test_duplicates(bs_dict, False)
| 33.290323
| 88
| 0.682171
|
4a0fca9d7f569122556835bd152a735b3c3fbad3
| 17,252
|
py
|
Python
|
census/core.py
|
mr-fuller/census
|
a786073348af809e88699b278ae33f623510f223
|
[
"BSD-3-Clause"
] | null | null | null |
census/core.py
|
mr-fuller/census
|
a786073348af809e88699b278ae33f623510f223
|
[
"BSD-3-Clause"
] | null | null | null |
census/core.py
|
mr-fuller/census
|
a786073348af809e88699b278ae33f623510f223
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from functools import wraps
from builtins import str
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import pkg_resources
__version__ = pkg_resources.require("census")[0].version
ALL = '*'
def new_session(*args, **kwargs):
import requests
return requests.session(*args, **kwargs)
class APIKeyError(Exception):
""" Invalid API key
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def list_or_str(v):
""" Convert a single value into a list.
"""
if isinstance(v, (list, tuple)):
return v
return [v]
def float_or_str(v):
try:
return float(v)
except ValueError:
return str(v)
def supported_years(*years):
def inner(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
year = kwargs.get('year', self.default_year)
_years = years if years else self.years
if int(year) not in _years:
raise UnsupportedYearException(
'Geography is not available in {}. Available years include {}'.format(year, _years))
return func(self, *args, **kwargs)
return wrapper
return inner
def retry_on_transient_error(func):
def wrapper(self, *args, **kwargs):
for _ in range(max(self.retries - 1, 0)):
try:
result = func(self, *args, **kwargs)
except CensusException as e:
if "There was an error while running your query. We've logged the error and we'll correct it ASAP. Sorry for the inconvenience." in str(e):
pass
else:
raise
else:
return result
else:
return func(self, *args, **kwargs)
return wrapper
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def merge(dicts):
return dict(item for d in dicts for item in d.items())
class CensusException(Exception):
pass
class UnsupportedYearException(CensusException):
pass
class Client(object):
endpoint_url = 'https://api.census.gov/data/%s/%s'
definitions_url = 'https://api.census.gov/data/%s/%s/variables.json'
definition_url = 'https://api.census.gov/data/%s/%s/variables/%s.json'
groups_url = 'https://api.census.gov/data/%s/%s/groups.json'
def __init__(self, key, year=None, session=None, retries=3):
self._key = key
self.session = session or new_session()
if year:
self.default_year = year
self.retries = retries
def tables(self, year=None):
"""
Returns a list of the data tables available from this source.
"""
# Set the default year if one hasn't been passed
if year is None:
year = self.default_year
# Query the table metadata as raw JSON
tables_url = self.groups_url % (year, self.dataset)
resp = self.session.get(tables_url)
# Pass it out
return resp.json()['groups']
@supported_years()
def fields(self, year=None, flat=False):
if year is None:
year = self.default_year
data = {}
fields_url = self.definitions_url % (year, self.dataset)
resp = self.session.get(fields_url)
obj = resp.json()
if flat:
for key, elem in obj['variables'].items():
if key in ['for', 'in']:
continue
data[key] = "{}: {}".format(elem['concept'], elem['label'])
else:
data = obj['variables']
if 'for' in data:
data.pop("for", None)
if 'in' in data:
data.pop("in", None)
return data
def get(self, fields, geo, year=None, **kwargs):
all_results = (self.query(fifty_fields, geo, year, **kwargs)
for fifty_fields in chunks(fields, 50))
merged_results = [merge(result) for result in zip(*all_results)]
return merged_results
@retry_on_transient_error
def query(self, fields, geo, year=None, **kwargs):
if year is None:
year = self.default_year
fields = list_or_str(fields)
url = self.endpoint_url % (year, self.dataset)
params = {
'get': ",".join(fields),
'for': geo['for'],
'key': self._key,
}
if 'in' in geo:
params['in'] = geo['in']
resp = self.session.get(url, params=params)
if resp.status_code == 200:
try:
data = resp.json()
except ValueError as ex:
if '<title>Invalid Key</title>' in resp.text:
raise APIKeyError(' '.join(resp.text.splitlines()))
else:
raise ex
headers = data.pop(0)
types = [self._field_type(header, year) for header in headers]
results = [{header : (cast(item) if item is not None else None)
for header, cast, item
in zip(headers, types, d)}
for d in data]
return results
elif resp.status_code == 204:
return []
else:
raise CensusException(resp.text)
@lru_cache(maxsize=1024)
def _field_type(self, field, year):
url = self.definition_url % (year, self.dataset, field)
resp = self.session.get(url)
types = {"fips-for" : str,
"fips-in" : str,
"int" : float_or_str,
"float": float,
"string": str}
if resp.status_code == 200:
predicate_type = resp.json().get("predicateType", "string")
return types[predicate_type]
else:
return str
@supported_years()
def us(self, fields, **kwargs):
return self.get(fields, geo={'for': 'us:1'}, **kwargs)
@supported_years()
def state(self, fields, state_fips, **kwargs):
return self.get(fields, geo={
'for': 'state:{}'.format(state_fips),
}, **kwargs)
@supported_years()
def state_county(self, fields, state_fips, county_fips, **kwargs):
return self.get(fields, geo={
'for': 'county:{}'.format(county_fips),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
@supported_years()
def state_place(self, fields, state_fips, place, **kwargs):
return self.get(fields, geo={
'for': 'place:{}'.format(place),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
@supported_years()
def state_district(self, fields, state_fips, district, **kwargs):
warnings.warn(
"state_district refers to congressional districts; use state_congressional_district instead",
DeprecationWarning
)
# throwaway, but we can't pass it in twice.
congressional_district = kwargs.pop('congressional_district', None)
return self.state_congressional_district(fields, state_fips, district, **kwargs)
@supported_years()
def state_congressional_district(self, fields, state_fips, congressional_district, **kwargs):
return self.get(fields, geo={
'for': 'congressional district:{}'.format(congressional_district),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
@supported_years()
def state_legislative_district_upper(self, fields, state_fips, legislative_district, **kwargs):
return self.get(fields, geo={
'for': 'state legislative district (upper chamber):{}'.format(str(legislative_district).zfill(3)),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
@supported_years()
def state_legislative_district_lower(self, fields, state_fips, legislative_district, **kwargs):
return self.get(fields, geo={
'for': 'state legislative district (lower chamber):{}'.format(str(legislative_district).zfill(3)),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
class ACSClient(Client):
def _switch_endpoints(self, year):
if year > 2009:
self.endpoint_url = 'https://api.census.gov/data/%s/acs/%s'
self.definitions_url = 'https://api.census.gov/data/%s/acs/%s/variables.json'
self.definition_url = 'https://api.census.gov/data/%s/acs/%s/variables/%s.json'
self.groups_url = 'https://api.census.gov/data/%s/acs/%s/groups.json'
else:
self.endpoint_url = super(ACSClient, self).endpoint_url
self.definitions_url = super(ACSClient, self).definitions_url
self.definition_url = super(ACSClient, self).definition_url
self.groups_url = super(ACSClient, self).groups_url
def tables(self, *args, **kwargs):
self._switch_endpoints(kwargs.get('year', self.default_year))
return super(ACSClient, self).tables(*args, **kwargs)
def get(self, *args, **kwargs):
self._switch_endpoints(kwargs.get('year', self.default_year))
return super(ACSClient, self).get(*args, **kwargs)
class ACS5Client(ACSClient):
default_year = 2018
dataset = 'acs5'
years = tuple(range(2018,2008,-1))
@supported_years()
def state_county_subdivision(self, fields, state_fips,
county_fips, subdiv_fips, **kwargs):
return self.get(fields, geo={
'for': 'county subdivision:{}'.format(subdiv_fips),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}, **kwargs)
@supported_years()
def state_county_tract(self, fields, state_fips,
county_fips, tract, **kwargs):
return self.get(fields, geo={
'for': 'tract:{}'.format(tract),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}, **kwargs)
@supported_years()
def state_county_blockgroup(self, fields, state_fips, county_fips,
blockgroup, tract=None, **kwargs):
geo = {
'for': 'block group:{}'.format(blockgroup),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}
if tract:
geo['in'] += ' tract:{}'.format(tract)
return self.get(fields, geo=geo, **kwargs)
@supported_years(2018,2017, 2016, 2015, 2014, 2013, 2012, 2011)
def zipcode(self, fields, zcta, **kwargs):
return self.get(fields, geo={
'for': 'zip code tabulation area:{}'.format(zcta),
}, **kwargs)
class ACS5DpClient(ACS5Client):
dataset = 'acs5/profile'
years = tuple(range(2018,2011,-1))
class ACS3Client(ACSClient):
default_year = 2013
dataset = 'acs3'
years = (2013, 2012)
@supported_years()
def state_county_subdivision(self, fields, state_fips,
county_fips, subdiv_fips, **kwargs):
return self.get(fields, geo={
'for': 'county subdivision:{}'.format(subdiv_fips),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}, **kwargs)
class ACS3DpClient(ACS3Client):
dataset = 'acs3/profile'
class ACS1Client(ACSClient):
default_year = 2017
dataset = 'acs1'
years = tuple(range(2018,2010,-1))
@supported_years()
def state_county_subdivision(self, fields, state_fips,
county_fips, subdiv_fips, **kwargs):
return self.get(fields, geo={
'for': 'county subdivision:{}'.format(subdiv_fips),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}, **kwargs)
class ACS1DpClient(ACS1Client):
dataset = 'acs1/profile'
years = tuple(range(2018,2011,-1))
class SF1Client(Client):
default_year = 2010
dataset = 'sf1'
years = (2010, 2000, 1990)
def _switch_endpoints(self, year):
if year > 2000:
self.endpoint_url = 'https://api.census.gov/data/%s/dec/%s'
self.definitions_url = 'https://api.census.gov/data/%s/dec/%s/variables.json'
self.definition_url = 'https://api.census.gov/data/%s/dec/%s/variables/%s.json'
self.groups_url = 'https://api.census.gov/data/%s/dec/%s/groups.json'
else:
self.endpoint_url = super(SF1Client, self).endpoint_url
self.definitions_url = super(SF1Client, self).definitions_url
self.definition_url = super(SF1Client, self).definition_url
self.groups_url = super(SF1Client, self).groups_url
def tables(self, *args, **kwargs):
self._switch_endpoints(kwargs.get('year', self.default_year))
return super(SF1Client, self).tables(*args, **kwargs)
def get(self, *args, **kwargs):
self._switch_endpoints(kwargs.get('year', self.default_year))
return super(SF1Client, self).get(*args, **kwargs)
@supported_years()
def state_county_subdivision(self, fields, state_fips,
county_fips, subdiv_fips, **kwargs):
return self.get(fields, geo={
'for': 'county subdivision:{}'.format(subdiv_fips),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}, **kwargs)
@supported_years()
def state_county_tract(self, fields, state_fips,
county_fips, tract, **kwargs):
return self.get(fields, geo={
'for': 'tract:{}'.format(tract),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}, **kwargs)
@supported_years()
def state_county_blockgroup(self, fields, state_fips, county_fips,
blockgroup, tract=None, **kwargs):
geo = {
'for': 'block group:{}'.format(blockgroup),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}
if tract:
geo['in'] += ' tract:{}'.format(tract)
return self.get(fields, geo=geo, **kwargs)
@supported_years(2010)
def state_msa(self, fields, state_fips, msa, **kwargs):
return self.get(fields, geo={
'for': ('metropolitan statistical area/' +
'micropolitan statistical area (or part):{}'.format(msa)),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
@supported_years(2010)
def state_csa(self, fields, state_fips, csa, **kwargs):
return self.get(fields, geo={
'for': 'combined statistical area (or part):{}'.format(csa),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
@supported_years(2010)
def state_district_place(self, fields, state_fips,
district, place, **kwargs):
return self.get(fields, geo={
'for': 'place/remainder (or part):{}'.format(place),
'in': 'state:{} congressional district:{}'.format(
state_fips, district),
}, **kwargs)
@supported_years(2010)
def state_zipcode(self, fields, state_fips, zcta, **kwargs):
return self.get(fields, geo={
'for': 'zip code tabulation area (or part):{}'.format(zcta),
'in': 'state:{}'.format(state_fips),
}, **kwargs)
class SF3Client(Client):
default_year = 2000
dataset = 'sf3'
years = (2000, 1990)
@supported_years()
def state_county_tract(self, fields, state_fips,
county_fips, tract, **kwargs):
return self.get(fields, geo={
'for': 'tract:{}'.format(tract),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}, **kwargs)
@supported_years()
def state_county_blockgroup(self, fields, state_fips, county_fips,
blockgroup, tract=None, **kwargs):
geo = {
'for': 'block group:{}'.format(blockgroup),
'in': 'state:{} county:{}'.format(state_fips, county_fips),
}
if tract:
geo['in'] += ' tract:{}'.format(tract)
return self.get(fields, geo=geo, **kwargs)
class Census(object):
ALL = ALL
def __init__(self, key, year=None, session=None):
if not session:
session = new_session()
self.session = session
self.session.headers.update({
'User-Agent': ('python-census/{} '.format(__version__) +
'github.com/datamade/census')
})
self._acs = ACS5Client(key, year, session) # deprecated
self.acs5 = ACS5Client(key, year, session)
self.acs3 = ACS3Client(key, year, session)
self.acs1 = ACS1Client(key, year, session)
self.acs5dp = ACS5DpClient(key, year, session)
self.acs3dp = ACS3DpClient(key, year, session)
self.acs1dp = ACS1DpClient(key, year, session)
self.sf1 = SF1Client(key, year, session)
self.sf3 = SF3Client(key, year, session)
@property
def acs(self):
warnings.warn('Use acs5 instead of acs', DeprecationWarning)
return self._acs
| 32.489642
| 157
| 0.576745
|
4a0fcb350e928f417f0a7f47e2eb917c5298882f
| 10,489
|
py
|
Python
|
packages/w3af/w3af/core/data/constants/vulns.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/data/constants/vulns.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/data/constants/vulns.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
"""
vulns.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
VULNS = {
'TestCase': None,
# Core
'Target redirect': None,
# Audit
'Blind SQL injection vulnerability': 46,
'Buffer overflow vulnerability': None,
'Sensitive CORS methods enabled': None,
'Uncommon CORS methods enabled': None,
'Access-Control-Allow-Origin set to "*"': None,
'Insecure Access-Control-Allow-Origin with credentials': None,
'Insecure Access-Control-Allow-Origin': None,
'Incorrect withCredentials implementation': None,
'CSRF vulnerability': 13,
'Insecure DAV configuration': 52,
'Publicly writable directory': 23,
'DAV incorrect configuration': None,
'Insecure file upload': 65,
'Format string vulnerability': None,
'Insecure Frontpage extensions configuration': 69,
'Insecure redirection': 50,
'Misconfigured access control': 20,
'LDAP injection vulnerability': 30,
'Local file inclusion vulnerability': 17,
'File read error': 73,
'MX injection vulnerability': None,
'OS commanding vulnerability': 36,
'Phishing vector': 74,
'Unsafe preg_replace usage': None,
'ReDoS vulnerability': None,
'Response splitting vulnerability': 41,
'Remote code execution': 42,
'Remote file inclusion': 42,
'Potential remote file inclusion': 42,
'SQL injection': 45,
'Server side include vulnerability': None,
'Persistent server side include vulnerability': None,
'Insecure SSL version': 66,
'Self-signed SSL certificate': 67,
'Invalid SSL connection': None,
'Soon to expire SSL certificate': None,
'SSL Certificate dump': None,
'Secure content over insecure channel': None,
'XPATH injection vulnerability': 54,
'Persistent Cross-Site Scripting vulnerability': 70,
'Cross site scripting vulnerability': 55,
'Cross site tracing vulnerability': 63,
'Parameter modifies response headers': None,
'eval() input injection vulnerability': 6,
'Reflected File Download vulnerability': 71,
'Shell shock vulnerability': 68,
'Rosetta Flash': None,
'Memcache injection vulnerability': None,
# WebSockets
'Insecure WebSocket Origin filter': None,
'Open WebSocket': None,
'Origin restricted WebSocket': None,
'Websockets CSRF vulnerability': None,
# Crawl
'dwsync.xml file found': None,
'phpinfo() file found': None,
'PHP register_globals: On': None,
'PHP allow_url_fopen: On': None,
'PHP allow_url_include: On': None,
'PHP display_errors: On': None,
'PHP expose_php: On': None,
'PHP lowest_privilege_test:fail': None,
'PHP disable_functions:few': None,
'PHP curl_file_support:not_fixed': None,
'PHP cgi_force_redirect: Off': None,
'PHP session.cookie_httponly: Off': None,
'PHP session_save_path:Everyone': None,
'PHP session_use_trans: On': None,
'PHP default_charset: Off': None,
'PHP enable_dl: On': None,
'PHP memory_limit:high': None,
'PHP post_max_size:high': None,
'PHP upload_max_filesize:high': None,
'PHP upload_tmp_dir:Everyone': None,
'PHP file_uploads: On': None,
'PHP magic_quotes_gpc: On': None,
'PHP magic_quotes_gpc: Off': None,
'PHP open_basedir:disabled': None,
'PHP open_basedir:enabled': None,
'PHP session.hash_function:md5': None,
'PHP session.hash_function:sha': None,
'Insecure URL': 9,
'.listing file found': None,
'Operating system username and group leak': None,
'Google hack database match': None,
'Phishing scam': None,
'Source code repository': 14,
'Insecure RIA settings': None,
'Cross-domain allow ACL': None,
'Potential web backdoor': 2,
'Captcha image detected': 5,
'Oracle Application Server': None,
'Potentially interesting file': 4,
'urllist.txt file': None,
'Fingerprinted operating system': None,
'Identified installed application': None,
'robots.txt file': None,
'HTTP Content Negotiation enabled': None,
'Fingerprinted Wordpress version': None,
'Gears manifest resource': None,
'Invalid RIA settings file': None,
'Identified WordPress user': None,
'WordPress path disclosure': None,
'PHP register_globals: Off': None,
'PHP enable_dl: Off': None,
'Web user home directory': None,
# Grep
'US Social Security Number disclosure': 48,
'DOM Cross site scripting': 56,
'Parameter has SQL sentence': None,
'Uncommon query string parameter': None,
'Credit card number disclosure': 12,
'Code disclosure vulnerability': 44,
'Code disclosure vulnerability in 404 page': 44,
'Unhandled error in web application': 73,
'Basic HTTP credentials': None,
'Authentication without www-authenticate header': None,
'NTLM authentication': None,
'HTTP Basic authentication': 77,
'Cookie without HttpOnly': 22,
'Secure cookie over HTTP': None,
'Secure flag missing in HTTPS cookie': 25,
'Secure cookies over insecure channel': None,
'Identified cookie': None,
'Cookie': None,
'Invalid cookie': None,
'Click-Jacking vulnerability': 53,
'Private IP disclosure vulnerability': 40,
'Directory indexing': 15,
'Path disclosure vulnerability': None,
'Missing cache control for HTTPS content': 72,
'SVN user disclosure vulnerability': None,
'HTTP Request in HTTP body': None,
'HTTP Response in HTTP body': None,
'Auto-completable form': 38,
'Session ID in URL': None,
'WSDL resource': None,
'DISCO resource': None,
'Symfony Framework with CSRF protection disabled': None,
'Descriptive error page': 73,
'Multiple descriptive error pages': 73,
'Error page with information disclosure': 73,
'Oracle application server': None,
'Strange header': None,
'Content-Location HTTP header anomaly': None,
'.NET Event Validation is disabled': None,
'.NET ViewState encryption is disabled': None,
'Email address disclosure': 16,
'Interesting HTML comment': None,
'HTML comment contains HTML code': None,
'Strange HTTP response code': 29,
'File upload form': 18,
'Interesting META tag': None,
'User defined regular expression match': None,
'Mark of the web': None,
'Cross-domain javascript source': None,
'Insecure X-XSS-Protection header usage': None,
'Browser plugin content': None,
'Strange HTTP Reason message': None,
'Hash string in HTML content': None,
'Blank http response body': None,
'Content feed resource': None,
'Malware identified': None,
'Insecure password submission over HTTP': 49,
'CSP vulnerability': None,
'Missing X-Content-Type-Options header': 76,
'Missing Strict Transport Security header': 19,
'Missing Expect-CT header': None,
'HTML5 WebSocket detected': None,
'Insecure password form access over HTTP': 49,
# Infrastructure
'Potential XSS vulnerability': None,
'HTTP and HTTPs hop distance': None,
'HTTP traceroute': None,
'Apache Server version': None,
'Shared hosting': None,
'Virtual host identified': None,
'Previous defacements': None,
'Email account': None,
'Internal hostname in HTML link': None,
'Default virtual host': None,
'No DNS wildcard': None,
'DNS wildcard': None,
'Webserver fingerprint': None,
'Web Application Firewall fingerprint': None,
'FrontPage configuration information': None,
'Customized frontpage configuration': None,
'FrontPage FPAdminScriptUrl': None,
'Operating system': None,
'Favicon identification': None,
'Favicon identification failed': None,
'Transparent proxy detected': None,
'PHP Egg': None,
'Fingerprinted PHP version': None,
'Server header': None,
'Omitted server header': None,
'Powered-by header': None,
'Non existent methods default to GET': None,
'DAV methods enabled': None,
'Allowed HTTP methods': 1,
'Active filter detected': None,
'Reverse proxy identified': None,
'HTTP load balancer detected': None,
'Information disclosure via .NET errors': 73,
'Potential virtual host misconfiguration': None,
'MS15-034': None,
'JetLeak': None,
'Werkzeug debugger enabled': None,
# Bruteforce
'Guessable credentials': 75,
# Attack
'DAV Misconfiguration': 23,
'Arbitrary file upload': 65,
'OS Commanding code execution': 36,
'Code execution via remote file inclusion': 42,
'(Blind) SQL injection': 46,
'Arbitrary file read': 17,
'Eval() code execution': 6,
# Users can add their vulnerabilities
'Manually added vulnerability': None,
}
def is_valid_name(name):
return name in VULNS
| 40.187739
| 74
| 0.608447
|
4a0fccfcc6ac372268d5aba2aebccd09f27da0d7
| 72,109
|
py
|
Python
|
synapse/handlers/message.py
|
sowieta/synapse
|
bfd7a9b65c5e092c6a7ccdd46e59a278b1cbbd57
|
[
"Apache-2.0"
] | 1
|
2021-12-30T23:47:29.000Z
|
2021-12-30T23:47:29.000Z
|
synapse/handlers/message.py
|
sowieta/synapse
|
bfd7a9b65c5e092c6a7ccdd46e59a278b1cbbd57
|
[
"Apache-2.0"
] | null | null | null |
synapse/handlers/message.py
|
sowieta/synapse
|
bfd7a9b65c5e092c6a7ccdd46e59a278b1cbbd57
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019-2020 The Matrix.org Foundation C.I.C.
# Copyrignt 2020 Sorunome
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
from canonicaljson import encode_canonical_json
from twisted.internet import defer
from twisted.internet.interfaces import IDelayedCall
from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
GuestAccess,
Membership,
RelationTypes,
UserTypes,
)
from synapse.api.errors import (
AuthError,
Codes,
ConsentNotGivenError,
NotFoundError,
ShadowBanError,
SynapseError,
UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.api.urls import ConsentURIBuilder
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester
from synapse.util import json_decoder, json_encoder, log_failure
from synapse.util.async_helpers import Linearizer, unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class MessageHandler:
"""Contains some read only APIs to get state about a room"""
def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.state = hs.get_state_handler()
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._event_serializer = hs.get_event_client_serializer()
self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
# The scheduled call to self._expire_event. None if no call is currently
# scheduled.
self._scheduled_expiry: Optional[IDelayedCall] = None
if not hs.config.worker.worker_app:
run_as_background_process(
"_schedule_next_expiry", self._schedule_next_expiry
)
async def get_room_data(
self,
user_id: str,
room_id: str,
event_type: str,
state_key: str,
) -> Optional[EventBase]:
"""Get data from a room.
Args:
user_id
room_id
event_type
state_key
Returns:
The path data content.
Raises:
SynapseError or AuthError if the user is not in the room
"""
(
membership,
membership_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
if membership == Membership.JOIN:
data = await self.state.get_current_state(room_id, event_type, state_key)
elif membership == Membership.LEAVE:
key = (event_type, state_key)
# If the membership is not JOIN, then the event ID should exist.
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
room_state = await self.state_store.get_state_for_events(
[membership_event_id], StateFilter.from_types([key])
)
data = room_state[membership_event_id].get(key)
else:
# check_user_in_room_or_world_readable, if it doesn't raise an AuthError, should
# only ever return a Membership.JOIN/LEAVE object
#
# Safeguard in case it returned something else
logger.error(
"Attempted to retrieve data from a room for a user that has never been in it. "
"This should not have happened."
)
raise SynapseError(403, "User not in room", errcode=Codes.FORBIDDEN)
return data
async def get_state_events(
self,
user_id: str,
room_id: str,
state_filter: Optional[StateFilter] = None,
at_token: Optional[StreamToken] = None,
is_guest: bool = False,
) -> List[dict]:
"""Retrieve all state events for a given room. If the user is
joined to the room then return the current state. If the user has
left the room return the state events from when they left. If an explicit
'at' parameter is passed, return the state events as of that event, if
visible.
Args:
user_id: The user requesting state events.
room_id: The room ID to get all state events from.
state_filter: The state filter used to fetch state from the database.
at_token: the stream token of the at which we are requesting
the stats. If the user is not allowed to view the state as of that
stream token, we raise a 403 SynapseError. If None, returns the current
state based on the current_state_events table.
is_guest: whether this user is a guest
Returns:
A list of dicts representing state events. [{}, {}, {}]
Raises:
NotFoundError (404) if the at token does not yield an event
AuthError (403) if the user doesn't have permission to view
members of this room.
"""
state_filter = state_filter or StateFilter.all()
if at_token:
# FIXME this claims to get the state at a stream position, but
# get_recent_events_for_room operates by topo ordering. This therefore
# does not reliably give you the state at the given stream position.
# (https://github.com/matrix-org/synapse/issues/3305)
last_events, _ = await self.store.get_recent_events_for_room(
room_id, end_token=at_token.room_key, limit=1
)
if not last_events:
raise NotFoundError("Can't find event for token %s" % (at_token,))
last_event = last_events[0]
# check whether the user is in the room at that time to determine
# whether they should be treated as peeking.
state_map = await self.state_store.get_state_for_event(
last_event.event_id,
StateFilter.from_types([(EventTypes.Member, user_id)]),
)
joined = False
membership_event = state_map.get((EventTypes.Member, user_id))
if membership_event:
joined = membership_event.membership == Membership.JOIN
is_peeking = not joined
visible_events = await filter_events_for_client(
self.storage,
user_id,
last_events,
filter_send_to_client=False,
is_peeking=is_peeking,
)
if visible_events:
room_state_events = await self.state_store.get_state_for_events(
[last_event.event_id], state_filter=state_filter
)
room_state: Mapping[Any, EventBase] = room_state_events[
last_event.event_id
]
else:
raise AuthError(
403,
"User %s not allowed to view events in room %s at token %s"
% (user_id, room_id, at_token),
)
else:
(
membership,
membership_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
if membership == Membership.JOIN:
state_ids = await self.store.get_filtered_current_state_ids(
room_id, state_filter=state_filter
)
room_state = await self.store.get_events(state_ids.values())
elif membership == Membership.LEAVE:
# If the membership is not JOIN, then the event ID should exist.
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
room_state_events = await self.state_store.get_state_for_events(
[membership_event_id], state_filter=state_filter
)
room_state = room_state_events[membership_event_id]
now = self.clock.time_msec()
events = await self._event_serializer.serialize_events(
room_state.values(),
now,
# We don't bother bundling aggregations in when asked for state
# events, as clients won't use them.
bundle_aggregations=False,
)
return events
async def get_joined_members(self, requester: Requester, room_id: str) -> dict:
"""Get all the joined members in the room and their profile information.
If the user has left the room return the state events from when they left.
Args:
requester: The user requesting state events.
room_id: The room ID to get all state events from.
Returns:
A dict of user_id to profile info
"""
user_id = requester.user.to_string()
if not requester.app_service:
# We check AS auth after fetching the room membership, as it
# requires us to pull out all joined members anyway.
membership, _ = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
if membership != Membership.JOIN:
raise NotImplementedError(
"Getting joined members after leaving is not implemented"
)
users_with_profile = await self.store.get_users_in_room_with_profiles(room_id)
# If this is an AS, double check that they are allowed to see the members.
# This can either be because the AS user is in the room or because there
# is a user in the room that the AS is "interested in"
if requester.app_service and user_id not in users_with_profile:
for uid in users_with_profile:
if requester.app_service.is_interested_in_user(uid):
break
else:
# Loop fell through, AS has no interested users in room
raise AuthError(403, "Appservice not in room")
return {
user_id: {
"avatar_url": profile.avatar_url,
"display_name": profile.display_name,
}
for user_id, profile in users_with_profile.items()
}
def maybe_schedule_expiry(self, event: EventBase) -> None:
"""Schedule the expiry of an event if there's not already one scheduled,
or if the one running is for an event that will expire after the provided
timestamp.
This function needs to invalidate the event cache, which is only possible on
the master process, and therefore needs to be run on there.
Args:
event: The event to schedule the expiry of.
"""
expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
if not isinstance(expiry_ts, int) or event.is_state():
return
# _schedule_expiry_for_event won't actually schedule anything if there's already
# a task scheduled for a timestamp that's sooner than the provided one.
self._schedule_expiry_for_event(event.event_id, expiry_ts)
async def _schedule_next_expiry(self) -> None:
"""Retrieve the ID and the expiry timestamp of the next event to be expired,
and schedule an expiry task for it.
If there's no event left to expire, set _expiry_scheduled to None so that a
future call to save_expiry_ts can schedule a new expiry task.
"""
# Try to get the expiry timestamp of the next event to expire.
res = await self.store.get_next_event_to_expire()
if res:
event_id, expiry_ts = res
self._schedule_expiry_for_event(event_id, expiry_ts)
def _schedule_expiry_for_event(self, event_id: str, expiry_ts: int) -> None:
"""Schedule an expiry task for the provided event if there's not already one
scheduled at a timestamp that's sooner than the provided one.
Args:
event_id: The ID of the event to expire.
expiry_ts: The timestamp at which to expire the event.
"""
if self._scheduled_expiry:
# If the provided timestamp refers to a time before the scheduled time of the
# next expiry task, cancel that task and reschedule it for this timestamp.
next_scheduled_expiry_ts = self._scheduled_expiry.getTime() * 1000
if expiry_ts < next_scheduled_expiry_ts:
self._scheduled_expiry.cancel()
else:
return
# Figure out how many seconds we need to wait before expiring the event.
now_ms = self.clock.time_msec()
delay = (expiry_ts - now_ms) / 1000
# callLater doesn't support negative delays, so trim the delay to 0 if we're
# in that case.
if delay < 0:
delay = 0
logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay)
self._scheduled_expiry = self.clock.call_later(
delay,
run_as_background_process,
"_expire_event",
self._expire_event,
event_id,
)
async def _expire_event(self, event_id: str) -> None:
"""Retrieve and expire an event that needs to be expired from the database.
If the event doesn't exist in the database, log it and delete the expiry date
from the database (so that we don't try to expire it again).
"""
assert self._ephemeral_events_enabled
self._scheduled_expiry = None
logger.info("Expiring event %s", event_id)
try:
# Expire the event if we know about it. This function also deletes the expiry
# date from the database in the same database transaction.
await self.store.expire_event(event_id)
except Exception as e:
logger.error("Could not expire event %s: %r", event_id, e)
# Schedule the expiry of the next event to expire.
await self._schedule_next_expiry()
# The duration (in ms) after which rooms should be removed
# `_rooms_to_exclude_from_dummy_event_insertion` (with the effect that we will try
# to generate a dummy event for them once more)
#
_DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000
class EventCreationHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self._event_auth_handler = hs.get_event_auth_handler()
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.profile_handler = hs.get_profile_handler()
self.event_builder_factory = hs.get_event_builder_factory()
self.server_name = hs.hostname
self.notifier = hs.get_notifier()
self.config = hs.config
self.require_membership_for_aliases = (
hs.config.server.require_membership_for_aliases
)
self._events_shard_config = self.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
self.membership_types_to_include_profile_data_in = {
Membership.JOIN,
Membership.KNOCK,
}
if self.hs.config.server.include_profile_data_on_invite:
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
self.request_ratelimiter = hs.get_request_ratelimiter()
# We arbitrarily limit concurrent event creation for a room to 5.
# This is to stop us from diverging history *too* much.
self.limiter = Linearizer(max_count=5, name="room_event_creation_limit")
self.action_generator = hs.get_action_generator()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules: "ThirdPartyEventRules" = (
self.hs.get_third_party_event_rules()
)
self._block_events_without_consent_error = (
self.config.consent.block_events_without_consent_error
)
# we need to construct a ConsentURIBuilder here, as it checks that the necessary
# config options, but *only* if we have a configuration for which we are
# going to need it.
if self._block_events_without_consent_error:
self._consent_uri_builder = ConsentURIBuilder(self.config)
# Rooms which should be excluded from dummy insertion. (For instance,
# those without local users who can send events into the room).
#
# map from room id to time-of-last-attempt.
#
self._rooms_to_exclude_from_dummy_event_insertion: Dict[str, int] = {}
# The number of forward extremeities before a dummy event is sent.
self._dummy_events_threshold = hs.config.server.dummy_events_threshold
if (
self.config.worker.run_background_tasks
and self.config.server.cleanup_extremities_with_dummy_events
):
self.clock.looping_call(
lambda: run_as_background_process(
"send_dummy_events_to_fill_extremities",
self._send_dummy_events_to_fill_extremities,
),
5 * 60 * 1000,
)
self._message_handler = hs.get_message_handler()
self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
self._external_cache = hs.get_external_cache()
# Stores the state groups we've recently added to the joined hosts
# external cache. Note that the timeout must be significantly less than
# the TTL on the external cache.
self._external_cache_joined_hosts_updates: Optional[ExpiringCache] = None
if self._external_cache.is_enabled():
self._external_cache_joined_hosts_updates = ExpiringCache(
"_external_cache_joined_hosts_updates",
self.clock,
expiry_ms=30 * 60 * 1000,
)
async def create_event(
self,
requester: Requester,
event_dict: dict,
txn_id: Optional[str] = None,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
"""
Given a dict from a client, create a new event.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
Adds display names to Join membership events.
Args:
requester
event_dict: An entire event
txn_id
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
If None, they will be requested from the database.
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
If non-None, prev_event_ids must also be provided.
require_consent: Whether to check if the requester has
consented to the privacy policy.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
Raises:
ResourceLimitError if server is blocked to some resource being
exceeded
Returns:
Tuple of created event, Context
"""
await self.auth.check_auth_blocking(requester=requester)
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
room_version_id = event_dict["content"]["room_version"]
room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version_obj:
# this can happen if support is withdrawn for a room version
raise UnsupportedRoomVersionError(room_version_id)
else:
try:
room_version_obj = await self.store.get_room_version(
event_dict["room_id"]
)
except NotFoundError:
raise AuthError(403, "Unknown room")
builder = self.event_builder_factory.for_room_version(
room_version_obj, event_dict
)
self.validator.validate_builder(builder)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
target = UserID.from_string(builder.state_key)
if membership in self.membership_types_to_include_profile_data_in:
# If event doesn't include a display name, add one.
profile = self.profile_handler
content = builder.content
try:
if "displayname" not in content:
displayname = await profile.get_displayname(target)
if displayname is not None:
content["displayname"] = displayname
if "avatar_url" not in content:
avatar_url = await profile.get_avatar_url(target)
if avatar_url is not None:
content["avatar_url"] = avatar_url
except Exception as e:
logger.info(
"Failed to get profile information for %r: %s", target, e
)
is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
if require_consent and not is_exempt:
await self.assert_accepted_privacy_policy(requester)
if requester.access_token_id is not None:
builder.internal_metadata.token_id = requester.access_token_id
if txn_id is not None:
builder.internal_metadata.txn_id = txn_id
builder.internal_metadata.outlier = outlier
builder.internal_metadata.historical = historical
event, context = await self.create_new_client_event(
builder=builder,
requester=requester,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
)
# In an ideal world we wouldn't need the second part of this condition. However,
# this behaviour isn't spec'd yet, meaning we should be able to deactivate this
# behaviour. Another reason is that this code is also evaluated each time a new
# m.room.aliases event is created, which includes hitting a /directory route.
# Therefore not including this condition here would render the similar one in
# synapse.handlers.directory pointless.
if builder.type == EventTypes.Aliases and self.require_membership_for_aliases:
# Ideally we'd do the membership check in event_auth.check(), which
# describes a spec'd algorithm for authenticating events received over
# federation as well as those created locally. As of room v3, aliases events
# can be created by users that are not in the room, therefore we have to
# tolerate them in event_auth.check().
prev_state_ids = await context.get_prev_state_ids()
prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
prev_event = (
await self.store.get_event(prev_event_id, allow_none=True)
if prev_event_id
else None
)
if not prev_event or prev_event.membership != Membership.JOIN:
logger.warning(
(
"Attempt to send `m.room.aliases` in room %s by user %s but"
" membership is %s"
),
event.room_id,
event.sender,
prev_event.membership if prev_event else None,
)
raise AuthError(
403, "You must be in the room to create an alias for it"
)
self.validator.validate_new(event, self.config)
return event, context
async def _is_exempt_from_privacy_policy(
self, builder: EventBuilder, requester: Requester
) -> bool:
""" "Determine if an event to be sent is exempt from having to consent
to the privacy policy
Args:
builder: event being created
requester: user requesting this event
Returns:
true if the event can be sent without the user consenting
"""
# the only thing the user can do is join the server notices room.
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
if membership == Membership.JOIN:
return await self._is_server_notices_room(builder.room_id)
elif membership == Membership.LEAVE:
# the user is always allowed to leave (but not kick people)
return builder.state_key == requester.user.to_string()
return False
async def _is_server_notices_room(self, room_id: str) -> bool:
if self.config.servernotices.server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self.config.servernotices.server_notices_mxid in user_ids
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
"""Check if a user has accepted the privacy policy
Called when the given user is about to do something that requires
privacy consent. We see if the user is exempt and otherwise check that
they have given consent. If they have not, a ConsentNotGiven error is
raised.
Args:
requester: The user making the request
Returns:
Returns normally if the user has consented or is exempt
Raises:
ConsentNotGivenError: if the user has not given consent yet
"""
if self._block_events_without_consent_error is None:
return
# exempt AS users from needing consent
if requester.app_service is not None:
return
user_id = requester.authenticated_entity
if not user_id.startswith("@"):
# The authenticated entity might not be a user, e.g. if it's the
# server puppetting the user.
return
user = UserID.from_string(user_id)
# exempt the system notices user
if (
self.config.servernotices.server_notices_mxid is not None
and user_id == self.config.servernotices.server_notices_mxid
):
return
u = await self.store.get_user_by_id(user_id)
assert u is not None
if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT):
# support and bot users are not required to consent
return
if u["appservice_id"] is not None:
# users registered by an appservice are exempt
return
if u["consent_version"] == self.config.consent.user_consent_version:
return
consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart)
msg = self._block_events_without_consent_error % {"consent_uri": consent_uri}
raise ConsentNotGivenError(msg=msg, consent_uri=consent_uri)
async def deduplicate_state_event(
self, event: EventBase, context: EventContext
) -> Optional[EventBase]:
"""
Checks whether event is in the latest resolved state in context.
Args:
event: The event to check for duplication.
context: The event context.
Returns:
The previous version of the event is returned, if it is found in the
event context. Otherwise, None is returned.
"""
prev_state_ids = await context.get_prev_state_ids()
prev_event_id = prev_state_ids.get((event.type, event.state_key))
if not prev_event_id:
return None
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
if not prev_event:
return None
if prev_event and event.user_id == prev_event.user_id:
prev_content = encode_canonical_json(prev_event.content)
next_content = encode_canonical_json(event.content)
if prev_content == next_content:
return prev_event
return None
async def create_and_send_nonmember_event(
self,
requester: Requester,
event_dict: dict,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
ratelimit: bool = True,
txn_id: Optional[str] = None,
ignore_shadow_ban: bool = False,
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
"""
Creates an event, then sends it.
See self.create_event and self.handle_new_client_event.
Args:
requester: The requester sending the event.
event_dict: An entire event.
prev_event_ids:
The event IDs to use as the prev events.
Should normally be left as None to automatically request them
from the database.
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
If non-None, prev_event_ids must also be provided.
ratelimit: Whether to rate limit this send.
txn_id: The transaction ID.
ignore_shadow_ban: True if shadow-banned users should be allowed to
send this event.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
Returns:
The event, and its stream ordering (if deduplication happened,
the previous, duplicate event).
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
if event_dict["type"] == EventTypes.Member:
raise SynapseError(
500, "Tried to send member event through non-member codepath"
)
if not ignore_shadow_ban and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
# We limit the number of concurrent event sends in a room so that we
# don't fork the DAG too much. If we don't limit then we can end up in
# a situation where event persistence can't keep up, causing
# extremities to pile up, which in turn leads to state resolution
# taking longer.
with (await self.limiter.queue(event_dict["room_id"])):
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
event_dict["room_id"],
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
if existing_event_id:
event = await self.store.get_event(existing_event_id)
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
return event, event.internal_metadata.stream_ordering
event, context = await self.create_event(
requester,
event_dict,
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
outlier=outlier,
historical=historical,
depth=depth,
)
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
event.sender,
)
spam_error = await self.spam_checker.check_event_for_spam(event)
if spam_error:
if not isinstance(spam_error, str):
spam_error = "Spam is not permitted here"
raise SynapseError(403, spam_error, Codes.FORBIDDEN)
ev = await self.handle_new_client_event(
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
ignore_shadow_ban=ignore_shadow_ban,
)
# we know it was persisted, so must have a stream ordering
assert ev.internal_metadata.stream_ordering
return ev, ev.internal_metadata.stream_ordering
@measure_func("create_new_client_event")
async def create_new_client_event(
self,
builder: EventBuilder,
requester: Optional[Requester] = None,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
"""Create a new event for a local client
Args:
builder:
requester:
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
If None, they will be requested from the database.
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
Returns:
Tuple of created event, context
"""
# Strip down the auth_event_ids to only what we need to auth the event.
# For example, we don't need extra m.room.member that don't match event.sender
full_state_ids_at_event = None
if auth_event_ids is not None:
# If auth events are provided, prev events must be also.
assert prev_event_ids is not None
# Copy the full auth state before it stripped down
full_state_ids_at_event = auth_event_ids.copy()
temp_event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
)
auth_events = await self.store.get_events_as_list(auth_event_ids)
# Create a StateMap[str]
auth_event_state_map = {
(e.type, e.state_key): e.event_id for e in auth_events
}
# Actually strip down and use the necessary auth events
auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event,
current_state_ids=auth_event_state_map,
for_verification=False,
)
if prev_event_ids is not None:
assert (
len(prev_event_ids) <= 10
), "Attempting to create an event with %i prev_events" % (
len(prev_event_ids),
)
else:
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
# we now ought to have some prev_events (unless it's a create event).
#
# do a quick sanity check here, rather than waiting until we've created the
# event and then try to auth it (which fails with a somewhat confusing "No
# create event in auth events")
assert (
builder.type == EventTypes.Create or len(prev_event_ids) > 0
), "Attempting to create an event with no prev_events"
event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
depth=depth,
)
# Pass on the outlier property from the builder to the event
# after it is created
if builder.internal_metadata.outlier:
event.internal_metadata.outlier = True
context = EventContext.for_outlier()
elif (
event.type == EventTypes.MSC2716_INSERTION
and full_state_ids_at_event
and builder.internal_metadata.is_historical()
):
old_state = await self.store.get_events_as_list(full_state_ids_at_event)
context = await self.state.compute_event_context(event, old_state=old_state)
else:
context = await self.state.compute_event_context(event)
if requester:
context.app_service = requester.app_service
res, new_content = await self.third_party_event_rules.check_event_allowed(
event, context
)
if res is False:
logger.info(
"Event %s forbidden by third-party rules",
event,
)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN
)
elif new_content is not None:
# the third-party rules want to replace the event. We'll need to build a new
# event.
event, context = await self._rebuild_event_after_third_party_rules(
new_content, event
)
self.validator.validate_new(event, self.config)
# If this event is an annotation then we check that that the sender
# can't annotate the same way twice (e.g. stops users from liking an
# event multiple times).
relation = event.content.get("m.relates_to", {})
if relation.get("rel_type") == RelationTypes.ANNOTATION:
relates_to = relation["event_id"]
aggregation_key = relation["key"]
already_exists = await self.store.has_user_annotated_event(
relates_to, event.type, aggregation_key, event.sender
)
if already_exists:
raise SynapseError(400, "Can't send same reaction twice")
logger.debug("Created event %s", event.event_id)
return event, context
@measure_func("handle_new_client_event")
async def handle_new_client_event(
self,
requester: Requester,
event: EventBase,
context: EventContext,
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
ignore_shadow_ban: bool = False,
) -> EventBase:
"""Processes a new event.
This includes deduplicating, checking auth, persisting,
notifying users, sending to remote servers, etc.
If called from a worker will hit out to the master process for final
processing.
Args:
requester
event
context
ratelimit
extra_users: Any extra users to notify about event
ignore_shadow_ban: True if shadow-banned users should be allowed to
send this event.
Return:
If the event was deduplicated, the previous, duplicate, event. Otherwise,
`event`.
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
extra_users = extra_users or []
# we don't apply shadow-banning to membership events here. Invites are blocked
# higher up the stack, and we allow shadow-banned users to send join and leave
# events as normal.
if (
event.type != EventTypes.Member
and not ignore_shadow_ban
and requester.shadow_banned
):
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
if event.is_state():
prev_event = await self.deduplicate_state_event(event, context)
if prev_event is not None:
logger.info(
"Not bothering to persist state event %s duplicated by %s",
event.event_id,
prev_event.event_id,
)
return prev_event
if event.is_state() and (event.type, event.state_key) == (
EventTypes.Create,
"",
):
room_version_id = event.content.get(
"room_version", RoomVersions.V1.identifier
)
room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version_obj:
raise UnsupportedRoomVersionError(
"Attempt to create a room with unsupported room version %s"
% (room_version_id,)
)
else:
room_version_obj = await self.store.get_room_version(event.room_id)
if event.internal_metadata.is_out_of_band_membership():
# the only sort of out-of-band-membership events we expect to see here are
# invite rejections and rescinded knocks that we have generated ourselves.
assert event.type == EventTypes.Member
assert event.content["membership"] == Membership.LEAVE
else:
try:
validate_event_for_room_version(room_version_obj, event)
await self._event_auth_handler.check_auth_rules_from_context(
room_version_obj, event, context
)
except AuthError as err:
logger.warning("Denying new event %r because %s", event, err)
raise err
# Ensure that we can round trip before trying to persist in db
try:
dump = json_encoder.encode(event.content)
json_decoder.decode(dump)
except Exception:
logger.exception("Failed to encode content: %r", event.content)
raise
# We now persist the event (and update the cache in parallel, since we
# don't want to block on it).
result = await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
self._persist_event,
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
extra_users=extra_users,
),
run_in_background(
self.cache_joined_hosts_for_event, event, context
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
],
consumeErrors=True,
)
).addErrback(unwrapFirstError)
return result[0]
async def _persist_event(
self,
requester: Requester,
event: EventBase,
context: EventContext,
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
"""Actually persists the event. Should only be called by
`handle_new_client_event`, and see its docstring for documentation of
the arguments.
"""
# Skip push notification actions for historical messages
# because we don't want to notify people about old history back in time.
# The historical messages also do not have the proper `context.current_state_ids`
# and `state_groups` because they have `prev_events` that aren't persisted yet
# (historical messages persisted in reverse-chronological order).
if not event.internal_metadata.is_historical():
await self.action_generator.handle_push_actions_for_event(event, context)
try:
# If we're a worker we need to hit out to the master.
writer_instance = self._events_shard_config.get_instance(event.room_id)
if writer_instance != self._instance_name:
result = await self.send_event(
instance_name=writer_instance,
event_id=event.event_id,
store=self.store,
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
extra_users=extra_users,
)
stream_id = result["stream_id"]
event_id = result["event_id"]
if event_id != event.event_id:
# If we get a different event back then it means that its
# been de-duplicated, so we replace the given event with the
# one already persisted.
event = await self.store.get_event(event_id)
else:
# If we newly persisted the event then we need to update its
# stream_ordering entry manually (as it was persisted on
# another worker).
event.internal_metadata.stream_ordering = stream_id
return event
event = await self.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)
return event
except Exception:
# Ensure that we actually remove the entries in the push actions
# staging area, if we calculated them.
await self.store.remove_push_actions_from_staging(event.event_id)
raise
async def cache_joined_hosts_for_event(
self, event: EventBase, context: EventContext
) -> None:
"""Precalculate the joined hosts at the event, when using Redis, so that
external federation senders don't have to recalculate it themselves.
"""
if not self._external_cache.is_enabled():
return
# If external cache is enabled we should always have this.
assert self._external_cache_joined_hosts_updates is not None
# We actually store two mappings, event ID -> prev state group,
# state group -> joined hosts, which is much more space efficient
# than event ID -> joined hosts.
#
# Note: We have to cache event ID -> prev state group, as we don't
# store that in the DB.
#
# Note: We set the state group -> joined hosts cache if it hasn't been
# set for a while, so that the expiry time is reset.
state_entry = await self.state.resolve_state_groups_for_events(
event.room_id, event_ids=event.prev_event_ids()
)
if state_entry.state_group:
await self._external_cache.set(
"event_to_prev_state_group",
event.event_id,
state_entry.state_group,
expiry_ms=60 * 60 * 1000,
)
if state_entry.state_group in self._external_cache_joined_hosts_updates:
return
joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry)
# Note that the expiry times must be larger than the expiry time in
# _external_cache_joined_hosts_updates.
await self._external_cache.set(
"get_joined_hosts",
str(state_entry.state_group),
list(joined_hosts),
expiry_ms=60 * 60 * 1000,
)
self._external_cache_joined_hosts_updates[state_entry.state_group] = None
async def _validate_canonical_alias(
self,
directory_handler: DirectoryHandler,
room_alias_str: str,
expected_room_id: str,
) -> None:
"""
Ensure that the given room alias points to the expected room ID.
Args:
directory_handler: The directory handler object.
room_alias_str: The room alias to check.
expected_room_id: The room ID that the alias should point to.
"""
room_alias = RoomAlias.from_string(room_alias_str)
try:
mapping = await directory_handler.get_association(room_alias)
except SynapseError as e:
# Turn M_NOT_FOUND errors into M_BAD_ALIAS errors.
if e.errcode == Codes.NOT_FOUND:
raise SynapseError(
400,
"Room alias %s does not point to the room" % (room_alias_str,),
Codes.BAD_ALIAS,
)
raise
if mapping["room_id"] != expected_room_id:
raise SynapseError(
400,
"Room alias %s does not point to the room" % (room_alias_str,),
Codes.BAD_ALIAS,
)
async def persist_and_notify_client_event(
self,
requester: Requester,
event: EventBase,
context: EventContext,
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
"""Called when we have fully built the event, have already
calculated the push actions for the event, and checked auth.
This should only be run on the instance in charge of persisting events.
Returns:
The persisted event. This may be different than the given event if
it was de-duplicated (e.g. because we had already persisted an
event with the same transaction ID.)
"""
extra_users = extra_users or []
assert self.storage.persistence is not None
assert self._events_shard_config.should_handle(
self._instance_name, event.room_id
)
if ratelimit:
# We check if this is a room admin redacting an event so that we
# can apply different ratelimiting. We do this by simply checking
# it's not a self-redaction (to avoid having to look up whether the
# user is actually admin or not).
is_admin_redaction = False
if event.type == EventTypes.Redaction:
original_event = await self.store.get_event(
event.redacts,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
allow_rejected=False,
allow_none=True,
)
is_admin_redaction = bool(
original_event and event.sender != original_event.sender
)
await self.request_ratelimiter.ratelimit(
requester, is_admin_redaction=is_admin_redaction
)
await self._maybe_kick_guest_users(event, context)
if event.type == EventTypes.CanonicalAlias:
# Validate a newly added alias or newly added alt_aliases.
original_alias = None
original_alt_aliases: List[str] = []
original_event_id = event.unsigned.get("replaces_state")
if original_event_id:
original_event = await self.store.get_event(original_event_id)
if original_event:
original_alias = original_event.content.get("alias", None)
original_alt_aliases = original_event.content.get("alt_aliases", [])
# Check the alias is currently valid (if it has changed).
room_alias_str = event.content.get("alias", None)
directory_handler = self.hs.get_directory_handler()
if room_alias_str and room_alias_str != original_alias:
await self._validate_canonical_alias(
directory_handler, room_alias_str, event.room_id
)
# Check that alt_aliases is the proper form.
alt_aliases = event.content.get("alt_aliases", [])
if not isinstance(alt_aliases, (list, tuple)):
raise SynapseError(
400, "The alt_aliases property must be a list.", Codes.INVALID_PARAM
)
# If the old version of alt_aliases is of an unknown form,
# completely replace it.
if not isinstance(original_alt_aliases, (list, tuple)):
original_alt_aliases = []
# Check that each alias is currently valid.
new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
if new_alt_aliases:
for alias_str in new_alt_aliases:
await self._validate_canonical_alias(
directory_handler, alias_str, event.room_id
)
federation_handler = self.hs.get_federation_handler()
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.INVITE:
event.unsigned[
"invite_room_state"
] = await self.store.get_stripped_room_state_from_event_context(
context,
self.room_prejoin_state_types,
membership_user_id=event.sender,
)
invitee = UserID.from_string(event.state_key)
if not self.hs.is_mine(invitee):
# TODO: Can we add signature from remote server in a nicer
# way? If we have been invited by a remote server, we need
# to get them to sign the event.
returned_invite = await federation_handler.send_invite(
invitee.domain, event
)
event.unsigned.pop("room_state", None)
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)
if event.content["membership"] == Membership.KNOCK:
event.unsigned[
"knock_room_state"
] = await self.store.get_stripped_room_state_from_event_context(
context,
self.room_prejoin_state_types,
)
if event.type == EventTypes.Redaction:
original_event = await self.store.get_event(
event.redacts,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
allow_rejected=False,
allow_none=True,
)
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
# we can make some additional checks now if we have the original event.
if original_event:
if original_event.type == EventTypes.Create:
raise AuthError(403, "Redacting create events is not permitted")
if original_event.room_id != event.room_id:
raise SynapseError(400, "Cannot redact event from a different room")
if original_event.type == EventTypes.ServerACL:
raise AuthError(403, "Redacting server ACL events is not permitted")
# Add a little safety stop-gap to prevent people from trying to
# redact MSC2716 related events when they're in a room version
# which does not support it yet. We allow people to use MSC2716
# events in existing room versions but only from the room
# creator since it does not require any changes to the auth
# rules and in effect, the redaction algorithm . In the
# supported room version, we add the `historical` power level to
# auth the MSC2716 related events and adjust the redaction
# algorthim to keep the `historical` field around (redacting an
# event should only strip fields which don't affect the
# structural protocol level).
is_msc2716_event = (
original_event.type == EventTypes.MSC2716_INSERTION
or original_event.type == EventTypes.MSC2716_BATCH
or original_event.type == EventTypes.MSC2716_MARKER
)
if not room_version_obj.msc2716_historical and is_msc2716_event:
raise AuthError(
403,
"Redacting MSC2716 events is not supported in this room version",
)
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
)
auth_events_map = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_map.values()}
if event_auth.check_redaction(
room_version_obj, event, auth_events=auth_events
):
# this user doesn't have 'redact' rights, so we need to do some more
# checks on the original event. Let's start by checking the original
# event exists.
if not original_event:
raise NotFoundError("Could not find event %s" % (event.redacts,))
if event.user_id != original_event.user_id:
raise AuthError(403, "You don't have permission to redact events")
# all the checks are done.
event.internal_metadata.recheck_redaction = False
if event.type == EventTypes.Create:
prev_state_ids = await context.get_prev_state_ids()
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")
if event.type == EventTypes.MSC2716_INSERTION:
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
create_event = await self.store.get_create_event_for_room(event.room_id)
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
# Only check an insertion event if the room version
# supports it or the event is from the room creator.
if room_version_obj.msc2716_historical or (
self.config.experimental.msc2716_enabled
and event.sender == room_creator
):
next_batch_id = event.content.get(
EventContentFields.MSC2716_NEXT_BATCH_ID
)
conflicting_insertion_event_id = (
await self.store.get_insertion_event_by_batch_id(
event.room_id, next_batch_id
)
)
if conflicting_insertion_event_id is not None:
# The current insertion event that we're processing is invalid
# because an insertion event already exists in the room with the
# same next_batch_id. We can't allow multiple because the batch
# pointing will get weird, e.g. we can't determine which insertion
# event the batch event is pointing to.
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Another insertion event already exists with the same next_batch_id",
errcode=Codes.INVALID_PARAM,
)
# Mark any `m.historical` messages as backfilled so they don't appear
# in `/sync` and have the proper decrementing `stream_ordering` as we import
backfilled = False
if event.internal_metadata.is_historical():
backfilled = True
# Note that this returns the event that was persisted, which may not be
# the same as we passed in if it was deduplicated due transaction IDs.
(
event,
event_pos,
max_stream_token,
) = await self.storage.persistence.persist_event(
event, context=context, backfilled=backfilled
)
if self._ephemeral_events_enabled:
# If there's an expiry timestamp on the event, schedule its expiry.
self._message_handler.maybe_schedule_expiry(event)
async def _notify() -> None:
try:
await self.notifier.on_new_room_event(
event, event_pos, max_stream_token, extra_users=extra_users
)
except Exception:
logger.exception(
"Error notifying about new room event %s",
event.event_id,
)
run_in_background(_notify)
if event.type == EventTypes.Message:
# We don't want to block sending messages on any presence code. This
# matters as sometimes presence code can take a while.
run_in_background(self._bump_active_time, requester.user)
return event
async def _maybe_kick_guest_users(
self, event: EventBase, context: EventContext
) -> None:
if event.type != EventTypes.GuestAccess:
return
guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
if guest_access == GuestAccess.CAN_JOIN:
return
current_state_ids = await context.get_current_state_ids()
# since this is a client-generated event, it cannot be an outlier and we must
# therefore have the state ids.
assert current_state_ids is not None
current_state_dict = await self.store.get_events(
list(current_state_ids.values())
)
current_state = list(current_state_dict.values())
logger.info("maybe_kick_guest_users %r", current_state)
await self.hs.get_room_member_handler().kick_guest_users(current_state)
async def _bump_active_time(self, user: UserID) -> None:
try:
presence = self.hs.get_presence_handler()
await presence.bump_presence_active_time(user)
except Exception:
logger.exception("Error bumping presence active time")
async def _send_dummy_events_to_fill_extremities(self) -> None:
"""Background task to send dummy events into rooms that have a large
number of extremities
"""
self._expire_rooms_to_exclude_from_dummy_event_insertion()
room_ids = await self.store.get_rooms_with_many_extremities(
min_count=self._dummy_events_threshold,
limit=5,
room_id_filter=self._rooms_to_exclude_from_dummy_event_insertion.keys(),
)
for room_id in room_ids:
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
if not dummy_event_sent:
# Did not find a valid user in the room, so remove from future attempts
# Exclusion is time limited, so the room will be rechecked in the future
# dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
logger.info(
"Failed to send dummy event into room %s. Will exclude it from "
"future attempts until cache expires" % (room_id,)
)
now = self.clock.time_msec()
self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now
async def _send_dummy_event_for_room(self, room_id: str) -> bool:
"""Attempt to send a dummy event for the given room.
Args:
room_id: room to try to send an event from
Returns:
True if a dummy event was successfully sent. False if no user was able
to send an event.
"""
# For each room we need to find a joined member we can use to send
# the dummy event with.
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
members = await self.state.get_current_users_in_room(
room_id, latest_event_ids=latest_event_ids
)
for user_id in members:
if not self.hs.is_mine_id(user_id):
continue
requester = create_requester(user_id, authenticated_entity=self.server_name)
try:
event, context = await self.create_event(
requester,
{
"type": EventTypes.Dummy,
"content": {},
"room_id": room_id,
"sender": user_id,
},
prev_event_ids=latest_event_ids,
)
event.internal_metadata.proactively_send = False
# Since this is a dummy-event it is OK if it is sent by a
# shadow-banned user.
await self.handle_new_client_event(
requester,
event,
context,
ratelimit=False,
ignore_shadow_ban=True,
)
return True
except AuthError:
logger.info(
"Failed to send dummy event into room %s for user %s due to "
"lack of power. Will try another user" % (room_id, user_id)
)
return False
def _expire_rooms_to_exclude_from_dummy_event_insertion(self) -> None:
expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
to_expire = set()
for room_id, time in self._rooms_to_exclude_from_dummy_event_insertion.items():
if time < expire_before:
to_expire.add(room_id)
for room_id in to_expire:
logger.debug(
"Expiring room id %s from dummy event insertion exclusion cache",
room_id,
)
del self._rooms_to_exclude_from_dummy_event_insertion[room_id]
async def _rebuild_event_after_third_party_rules(
self, third_party_result: dict, original_event: EventBase
) -> Tuple[EventBase, EventContext]:
# the third_party_event_rules want to replace the event.
# we do some basic checks, and then return the replacement event and context.
# Construct a new EventBuilder and validate it, which helps with the
# rest of these checks.
try:
builder = self.event_builder_factory.for_room_version(
original_event.room_version, third_party_result
)
self.validator.validate_builder(builder)
except SynapseError as e:
raise Exception(
"Third party rules module created an invalid event: " + e.msg,
)
immutable_fields = [
# changing the room is going to break things: we've already checked that the
# room exists, and are holding a concurrency limiter token for that room.
# Also, we might need to use a different room version.
"room_id",
# changing the type or state key might work, but we'd need to check that the
# calling functions aren't making assumptions about them.
"type",
"state_key",
]
for k in immutable_fields:
if getattr(builder, k, None) != original_event.get(k):
raise Exception(
"Third party rules module created an invalid event: "
"cannot change field " + k
)
# check that the new sender belongs to this HS
if not self.hs.is_mine_id(builder.sender):
raise Exception(
"Third party rules module created an invalid event: "
"invalid sender " + builder.sender
)
# copy over the original internal metadata
for k, v in original_event.internal_metadata.get_dict().items():
setattr(builder.internal_metadata, k, v)
# modules can send new state events, so we re-calculate the auth events just in
# case.
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=None,
)
# we rebuild the event context, to be on the safe side. If nothing else,
# delta_ids might need an update.
context = await self.state.compute_event_context(event)
return event, context
| 41.465785
| 95
| 0.609106
|
4a0fcee5f9c878bda7435f8f3923fa797b7f0173
| 2,256
|
py
|
Python
|
textnormalize/cleaners.py
|
cyrta/textnormalizer
|
f4bb577c1c344b0d798cd46978e5332d08274af3
|
[
"MIT"
] | null | null | null |
textnormalize/cleaners.py
|
cyrta/textnormalizer
|
f4bb577c1c344b0d798cd46978e5332d08274af3
|
[
"MIT"
] | null | null | null |
textnormalize/cleaners.py
|
cyrta/textnormalizer
|
f4bb577c1c344b0d798cd46978e5332d08274af3
|
[
"MIT"
] | null | null | null |
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return re.sub(r'[^\x00-\x7F]+', '', text) # This simply strips non-ASCII characters.
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| 26.541176
| 95
| 0.692376
|
4a0fcf3fb7168a10fea51899ef9a8e779f8612c3
| 232
|
py
|
Python
|
codility/nesting.py
|
py-in-the-sky/challenges
|
4a36095de8cb56b4f9f83c241eafb13dfbeb4065
|
[
"MIT"
] | null | null | null |
codility/nesting.py
|
py-in-the-sky/challenges
|
4a36095de8cb56b4f9f83c241eafb13dfbeb4065
|
[
"MIT"
] | null | null | null |
codility/nesting.py
|
py-in-the-sky/challenges
|
4a36095de8cb56b4f9f83c241eafb13dfbeb4065
|
[
"MIT"
] | null | null | null |
"""
https://codility.com/programmers/task/nesting/
"""
def solution(S):
balance = 0
for char in S:
balance += (1 if char == '(' else -1)
if balance < 0:
return 0
return int(balance == 0)
| 14.5
| 46
| 0.521552
|
4a0fcf48af0b9035c972bb45b104df9e7d405f7c
| 1,506
|
py
|
Python
|
discord/oggparse.py
|
nextcord-ext/nextcord
|
7b3022ae19299e1f40f5f34da33b80ae491aa06a
|
[
"MIT"
] | null | null | null |
discord/oggparse.py
|
nextcord-ext/nextcord
|
7b3022ae19299e1f40f5f34da33b80ae491aa06a
|
[
"MIT"
] | null | null | null |
discord/oggparse.py
|
nextcord-ext/nextcord
|
7b3022ae19299e1f40f5f34da33b80ae491aa06a
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present tag-epic
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------
Aliased moodule. See the same file in the nextcord folder for more information
Autogenerated by aliasgen.py
"""
from nextcord.oggparse import (
ClassVar,
DiscordException,
Generator,
IO,
OggError,
OggPage,
OggStream,
Optional,
TYPE_CHECKING,
Tuple,
annotations,
struct,
)
__all__ = ("OggError", "OggPage", "OggStream")
| 32.73913
| 78
| 0.760956
|
4a0fcfefaf6e168d0bca24c3d9df1cf56bc5e3c2
| 2,687
|
py
|
Python
|
fastapi_cache/decorator.py
|
rushilsrivastava/fastapi-cache
|
1f7b8837878f8b38370a11afc6757c707df20cdb
|
[
"Apache-2.0"
] | 2
|
2021-03-29T03:18:57.000Z
|
2021-03-29T03:27:59.000Z
|
fastapi_cache/decorator.py
|
rushilsrivastava/fastapi-cache
|
1f7b8837878f8b38370a11afc6757c707df20cdb
|
[
"Apache-2.0"
] | null | null | null |
fastapi_cache/decorator.py
|
rushilsrivastava/fastapi-cache
|
1f7b8837878f8b38370a11afc6757c707df20cdb
|
[
"Apache-2.0"
] | null | null | null |
from functools import wraps
from typing import Callable, Optional, Type
from fastapi_cache import FastAPICache
from fastapi_cache.coder import Coder
def cache(
expire: int = None,
coder: Type[Coder] = None,
key_builder: Callable = None,
namespace: Optional[str] = "",
):
"""
cache all function
:param namespace:
:param expire:
:param coder:
:param key_builder:
:return:
"""
def wrapper(func):
@wraps(func)
async def inner(*args, **kwargs):
nonlocal coder
nonlocal expire
nonlocal key_builder
copy_kwargs = kwargs.copy()
request = copy_kwargs.pop("request", None)
response = copy_kwargs.pop("response", None)
if request and request.headers.get("Cache-Control") == "no-store":
return await func(*args, **kwargs)
coder = coder or FastAPICache.get_coder()
expire = expire or FastAPICache.get_expire()
key_builder = key_builder or FastAPICache.get_key_builder()
backend = FastAPICache.get_backend()
cache_key = key_builder(
func,
namespace,
request=request,
response=response,
args=coder.encode(args),
kwargs=coder.encode(copy_kwargs),
)
try:
ttl, ret = await backend.get_with_ttl(cache_key)
except ConnectionResetError:
return await func(*args, **kwargs)
if not request:
if ret is not None:
return coder.decode(ret)
ret = await func(*args, **kwargs)
await backend.set(
cache_key, coder.encode(ret), expire or FastAPICache.get_expire()
)
return ret
if request.method != "GET":
return await func(request, *args, **kwargs)
if_none_match = request.headers.get("if-none-match")
if ret is not None:
if response:
response.headers["Cache-Control"] = f"max-age={ttl}"
etag = f"W/{hash(ret)}"
if if_none_match == etag:
response.status_code = 304
return response
response.headers["ETag"] = etag
return coder.decode(ret)
ret = await func(*args, **kwargs)
await backend.set(
cache_key, coder.encode(ret), expire or FastAPICache.get_expire()
)
return ret
return inner
return wrapper
| 32.373494
| 85
| 0.528098
|
4a0fd098a873439b3bbc96f7076a6385999fc28e
| 128
|
py
|
Python
|
uvcgan/__init__.py
|
LS4GAN/uvcgan
|
376439ae2a9be684ff279ddf634fe137aadc5df5
|
[
"BSD-2-Clause"
] | 20
|
2022-02-14T22:36:19.000Z
|
2022-03-29T06:31:30.000Z
|
uvcgan/__init__.py
|
LS4GAN/uvcgan
|
376439ae2a9be684ff279ddf634fe137aadc5df5
|
[
"BSD-2-Clause"
] | 1
|
2022-03-09T17:23:30.000Z
|
2022-03-09T17:23:30.000Z
|
uvcgan/__init__.py
|
LS4GAN/uvcgan
|
376439ae2a9be684ff279ddf634fe137aadc5df5
|
[
"BSD-2-Clause"
] | 3
|
2022-02-14T22:36:41.000Z
|
2022-03-20T12:53:29.000Z
|
from .consts import CONFIG_NAME, ROOT_DATA, ROOT_OUTDIR
from .utils.funcs import join_dicts
from .train.train import train
| 32
| 60
| 0.796875
|
4a0fd0a3e55f98432a45820793b7b3d7cb2f4df8
| 1,238
|
py
|
Python
|
tools/try_importing_all.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
tools/try_importing_all.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
tools/try_importing_all.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
import glob
import os
import subprocess
LIB_DIR = "from_cpython/Lib"
EXE_PATH = "./pyston_dbg"
modules = []
for fn in glob.glob("%s/*.py" % LIB_DIR):
modules.append(os.path.basename(fn)[:-3])
for fn in glob.glob("%s/*" % LIB_DIR):
if not os.path.isdir(fn):
continue
modules.append(os.path.basename(fn))
print modules
nworked = 0
total = 0
print len(modules)
f = open("failures.txt", 'w')
for m in modules:
if '-' in m:
continue
p = subprocess.Popen([EXE_PATH, "-q"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
# We need to make pyston exit with a non-zero return code if an exception was thrown,
# so use this little dance. If the import succeeds, then we call sys.exit(0), otherwise
# we skip to the next line and call sys.exit(1)
p.stdin.write("import sys\n")
p.stdin.write("import %s; sys.exit(0)\n" % m)
p.stdin.write("sys.exit(1)\n")
p.stdin.close()
err = p.stderr.read()
code = p.wait()
if code == 0:
print m, "worked",
nworked += 1
else:
print code
print m, "failed",
print >>f, m
print >>f, '\n'.join(err.split('\n')[-4:])
f.flush()
total += 1
print "%d/%d" % (nworked, total)
| 26.913043
| 92
| 0.599354
|
4a0fd18323a5b5972a5da5c6314ce84287437c51
| 5,833
|
py
|
Python
|
covid-19/covid-19.py
|
martinliu/elastic-labs
|
4ce4afbaa7e27780a4b8224c2ce14a1176203dd0
|
[
"Apache-2.0"
] | 3
|
2020-09-07T06:17:01.000Z
|
2022-03-12T09:09:46.000Z
|
covid-19/covid-19.py
|
martinliu/elastic-labs
|
4ce4afbaa7e27780a4b8224c2ce14a1176203dd0
|
[
"Apache-2.0"
] | null | null | null |
covid-19/covid-19.py
|
martinliu/elastic-labs
|
4ce4afbaa7e27780a4b8224c2ce14a1176203dd0
|
[
"Apache-2.0"
] | 1
|
2021-04-15T08:16:02.000Z
|
2021-04-15T08:16:02.000Z
|
import requests
import json
import time
import hashlib
import os
from elasticsearch import Elasticsearch
def make_id(dict):
# use date ad part of id to make sure one record per day which is easy to analyze
time_local = time.localtime(dict['updateTime']/1000)
dt = time.strftime("%Y-%m-%d",time_local)
text='-'.join([dt,dict['continentName'] if 'continentName' in dict else '',dict['countryName'],dict['provinceName'],dict['cityName'] if 'cityName' in dict else '']).encode()
return str(hashlib.md5(text).hexdigest()).lower()
# Init ES
es_url = os.environ.get('ES_URL') if 'ES_URL' in os.environ else 'http://192.168.50.10'
es_username = os.environ.get('ES_USERNAME') if 'ES_USERNAME' in os.environ else 'elastic'
es_passwd = os.environ.get('ES_PASSWD') if 'ES_PASSWD' in os.environ else ''
es = Elasticsearch(hosts=[es_url],http_auth=(es_username, es_passwd))
index_name = "covid-19-data"
# Check if need to import all data
need_all_data = not es.indices.exists(index=index_name)
# create index
if need_all_data:
es.indices.create(index=index_name,body={
"settings": {
"index.refresh_interval": "5s"
},
"mappings": {
"properties": {
"id": {
"type": "keyword"
},
"level": {
"type": "keyword"
},
"continentEnglishName" : {
"type" : "keyword",
},
"continentName" : {
"type" : "keyword"
},
"countryEnglishName" : {
"type" : "keyword"
},
"countryName" : {
"type" : "keyword"
},
"countryShortCode" : {
"type" : "keyword"
},
"cityEnglishName": {
"type": "keyword"
},
"cityName": {
"type": "keyword"
},
"comment": {
"type": "text"
},
"confirmedCount": {
"type": "long"
},
"curedCount": {
"type": "long"
},
"currentConfirmedCount": {
"type": "long"
},
"deadCount": {
"type": "long"
},
"locationId": {
"type": "keyword"
},
"provinceEnglishName" : {
"type" : "keyword"
},
"provinceName": {
"type": "keyword"
},
"provinceShortName": {
"type": "keyword"
},
"statisticsData": {
"type": "object",
"enabled": False
},
"suspectedCount": {
"type": "long"
},
"updateTime":{
"type":"date"
}
}
}
})
# request to covid-19 api
all_url='https://lab.isaaclin.cn/nCoV/api/area?latest=0'
update_url='https://lab.isaaclin.cn/nCoV/api/area?latest=1'
debug_url='https://lab.isaaclin.cn/nCoV/api/area?latest=1&province=%E6%96%B0%E8%A5%BF%E5%85%B0'
url=update_url
if need_all_data :
url = all_url
print("All historical data will be crawled and this will take some time. Be Patient!")
else:
print('Just try to crawl latest data')
print("Start to request %s"%(url))
response = requests.get(url)
print("Finish request! Time %d s" %(response.elapsed.total_seconds()))
# print(response.text)
print("Decode to json......")
response_json = json.loads(response.text)
#print(response_json)
data_ids=[]
duplicate_data=[]
data_array=[]
current_time = int(round(time.time() * 1000))
for result in response_json['results']:
#print(result.get('provinceName'))
province_data={'level': 'province'}
city_data_prep={'level': 'city'}
for k,v in result.items():
if k not in ['cities','comment']:
province_data[k] = v
city_data_prep[k] = v
# use current time
if not need_all_data:
province_data['updateTime'] = current_time
province_data['id'] = make_id(province_data)
if province_data['id'] in data_ids:
duplicate_data.append(province_data)
else:
data_ids.append(province_data['id'])
data_array.append(province_data)
# update time
time_local = time.localtime(province_data['updateTime']/1000)
dt = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
print('%d - UpdateTime %s, Add province %s, Id %s ' % (len(data_array),dt,province_data['provinceName'],province_data['id']))
if 'cities' in result and result['cities'] != None:
for city in result['cities']:
city_data=city_data_prep.copy()
for k,v in city.items():
city_data[k] = v
#print(city_data)
# use current time
if not need_all_data:
city_data['updateTime'] = current_time
city_data['id'] = make_id(city_data)
if city_data['id'] in data_ids:
duplicate_data.append(city_data)
else:
data_ids.append(city_data['id'])
data_array.append(city_data)
print('%d - UpdateTime %s, Add province %s, city %s, Id %s ' % (len(data_array),dt,province_data['provinceName'],city_data['cityName'],city_data['id']))
print('Crawl total ',len(data_array),' items!')
print('Start to import to es !')
print('es url is %s' % es_url)
bulk_actions = []
for data in data_array:
bulk_actions.append({
'update': {
'_index': index_name,
#'_type': 'doc',
'_id': data['id'],
}
})
bulk_actions.append({
'doc': data,
'doc_as_upsert': True
})
if len(bulk_actions) >= 5000:
# print(bulk_actions)
res = es.bulk(body=bulk_actions)
# print(total_count)
print(".")
bulk_actions = []
if len(bulk_actions) > 0:
res = es.bulk(body=bulk_actions)
print(".")
# print(res)
# print(total_count)
print('=========================END==========================')
print('Import total ',len(data_array),' records!')
print("With %d duplicate records!"%(len(duplicate_data)))
| 27.514151
| 177
| 0.574833
|
4a0fd2726f6f6027e79487f8402d7580239e36a2
| 4,783
|
py
|
Python
|
benchmark/testsystems/testsystems.py
|
choderalab/integrator-benchmark
|
bb307e6ebf476b652e62e41ae49730f530732da3
|
[
"MIT"
] | 5
|
2017-02-22T09:08:21.000Z
|
2021-09-08T21:21:35.000Z
|
benchmark/testsystems/testsystems.py
|
choderalab/integrator-benchmark
|
bb307e6ebf476b652e62e41ae49730f530732da3
|
[
"MIT"
] | 36
|
2017-04-15T21:34:25.000Z
|
2018-07-22T13:56:40.000Z
|
benchmark/testsystems/testsystems.py
|
choderalab/integrator-benchmark
|
bb307e6ebf476b652e62e41ae49730f530732da3
|
[
"MIT"
] | 2
|
2019-12-06T05:43:10.000Z
|
2021-04-01T01:00:24.000Z
|
import numpy as np
from openmmtools.testsystems import LysozymeImplicit, DHFRExplicit, SrcExplicit
from openmmtools.forcefactories import replace_reaction_field
from simtk.openmm import app
from simtk import unit
from benchmark.testsystems.configuration import configure_platform
from benchmark.utilities import keep_only_some_forces
from benchmark import simulation_parameters
from benchmark.utilities import add_barostat
from .low_dimensional_systems import load_constraint_coupled_harmonic_oscillators
temperature = simulation_parameters["temperature"]
def load_t4_implicit(constrained=True):
if constrained:
constraints = app.HBonds
else:
constraints = None
testsystem = LysozymeImplicit(constraints=constraints, implicitSolvent=app.OBC2)
topology, system, positions = testsystem.topology, testsystem.system, testsystem.positions
keep_only_some_forces(system, extra_forces_to_keep=["GBSAOBCForce"])
return topology, system, positions
def load_dhfr_explicit(constrained=True):
if constrained:
constraints = app.HBonds
rigid_water = True
else:
constraints = None
rigid_water = False
testsystem = DHFRExplicit(constraints=constraints, rigid_water=rigid_water)
topology, system, positions = testsystem.topology, testsystem.system, testsystem.positions
keep_only_some_forces(system)
add_barostat(system)
return topology, system, positions
def load_src_explicit(constrained=True):
if constrained:
constraints = app.HBonds
rigid_water = True
else:
constraints = None
rigid_water = False
testsystem = SrcExplicit(constraints=constraints, rigid_water=rigid_water)
topology, system, positions = testsystem.topology, testsystem.system, testsystem.positions
keep_only_some_forces(system)
add_barostat(system)
return topology, system, positions
def load_dhfr_reaction_field(constrained=True):
"""DHFR in explicit solvent, but using reaction field instead of PME for nonbonded"""
if constrained:
constraints = app.HBonds
rigid_water = True
else:
constraints = None
rigid_water = False
testsystem = DHFRExplicit(nonbondedCutoff=15*unit.angstrom, nonbondedMethod=app.CutoffPeriodic, constraints=constraints, rigid_water=rigid_water)
topology, system, positions = testsystem.topology, testsystem.system, testsystem.positions
keep_only_some_forces(system)
system = replace_reaction_field(system, shifted=True)
add_barostat(system)
return topology, system, positions
n_samples = 1000
default_thinning = 1000
burn_in_length = 10000
default_timestep = 0.25 * unit.femtosecond
from benchmark.testsystems.bookkeepers import EquilibriumSimulator
def construct_simulator(name, top, sys, pos, timestep=default_timestep,
thinning_interval=default_thinning):
return EquilibriumSimulator(platform=configure_platform("CUDA"),
topology=top, system=sys, positions=pos,
temperature=temperature,
timestep=timestep,
burn_in_length=burn_in_length, n_samples=n_samples,
thinning_interval=thinning_interval, name=name)
# DHFR
dhfr_constrained = construct_simulator("dhfr_constrained", *load_dhfr_explicit(constrained=True))
top, sys, pos = load_dhfr_explicit(constrained=False)
dhfr_unconstrained = construct_simulator("dhfr_unconstrained", top, sys, pos, default_timestep / 2.5, default_thinning * 2.5)
# DHFR reaction field (for the MTS experiment)
dhfr_reaction_field = construct_simulator("dhfr_constrained_reaction_field", *load_dhfr_reaction_field(constrained=True))
# Src explicit
top, sys, pos = load_src_explicit(constrained=True)
src_constrained = construct_simulator("src_constrained", top, sys, pos, default_timestep / 2.5, default_thinning * 2.5)
# T4 lysozyme
t4_constrained = construct_simulator("t4_constrained", *load_t4_implicit(constrained=True))
t4_unconstrained = construct_simulator("t4_unconstrained", *load_t4_implicit(constrained=False))
# constraint-coupled harmonic oscillators
top, sys, pos = load_constraint_coupled_harmonic_oscillators(constrained=True)
constraint_coupled_harmonic_oscillators = EquilibriumSimulator(platform=configure_platform("Reference"),
topology=top, system=sys, positions=pos,
temperature=temperature,
timestep=1000.0 * unit.femtosecond,
burn_in_length=50, n_samples=10000,
thinning_interval=10, name="constraint_coupled_harmonic_oscillators")
| 41.232759
| 149
| 0.734685
|
4a0fd42ef8d389e136733706abe0bfa6ab5a03b6
| 4,910
|
py
|
Python
|
tests/tests_unit/test_contextualization/test_match_rules.py
|
AlexThunder/cognite-sdk-python-experimental
|
468d29e7809793ed45cef5da25dca22418839972
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_unit/test_contextualization/test_match_rules.py
|
AlexThunder/cognite-sdk-python-experimental
|
468d29e7809793ed45cef5da25dca22418839972
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_unit/test_contextualization/test_match_rules.py
|
AlexThunder/cognite-sdk-python-experimental
|
468d29e7809793ed45cef5da25dca22418839972
|
[
"Apache-2.0"
] | null | null | null |
import re
import unittest
import pytest
from cognite.client.data_classes import ContextualizationJob
from cognite.experimental import CogniteClient
from tests.utils import jsgz_load
COGNITE_CLIENT = CogniteClient()
RULES_API = COGNITE_CLIENT.match_rules
@pytest.fixture
def sources():
return [{"id": 1, "name": "prefix_12_AB_0001/suffix",}, {"id": 2, "name": "prefix_12_AB_0002/suffix",}]
@pytest.fixture
def targets():
return [{"id": 1, "name": "12_AB_0001",}, {"id": 2, "name": "12_AB_0002",}]
@pytest.fixture
def rules():
return [
{
"extractors": [
{
"entitySet": "sources",
"extractorType": "regex",
"field": "name",
"pattern": "^[a-z]+_([0-9]+)_([A-Z]+)_([0-9]+)(.*)$",
},
{
"entitySet": "targets",
"extractorType": "regex",
"field": "name",
"pattern": "^([0-9]+)_([A-Z]+)_([0-9]+)$",
},
],
"conditions": [
{"conditionType": "equals", "arguments": [[0, 0], [1, 0]]},
{"conditionType": "equals", "arguments": [[0, 1], [1, 1]]},
{"conditionType": "equals", "arguments": [[0, 2], [1, 2]]},
],
"priority": 30,
}
]
@pytest.fixture
def reference_matches():
return [{"sourceId": i, "targetId": i} for i in [1, 2]]
@pytest.fixture
def matches(sources, targets):
return [{"sources": s, "targets": t} for s, t in zip(sources, targets)]
@pytest.fixture
def result_mock(matches, rules):
return [{"flags": [], "numberOfMatches": 2, "conflicts": {}, "overlaps": {}, "matches": matches, "rule": rules[0]}]
@pytest.fixture
def mock_apply(rsps):
response_body = {"jobId": 121110, "status": "Queued"}
rsps.add(
rsps.POST,
RULES_API._get_base_url_with_base_path() + RULES_API._RESOURCE_PATH + "/apply",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_suggest(rsps):
response_body = {"jobId": 101112, "status": "Queued"}
rsps.add(
rsps.POST,
RULES_API._get_base_url_with_base_path() + RULES_API._RESOURCE_PATH + "/suggest",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_apply_ok(rsps, result_mock):
response_body = {
"jobId": 121110,
"status": "Completed",
"items": result_mock,
}
rsps.add(
rsps.GET,
re.compile(RULES_API._get_base_url_with_base_path() + RULES_API._RESOURCE_PATH + "/apply" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_suggest_ok(rsps, rules):
response_body = {
"jobId": 121110,
"status": "Completed",
"rules": rules,
}
rsps.add(
rsps.GET,
re.compile(RULES_API._get_base_url_with_base_path() + RULES_API._RESOURCE_PATH + "/suggest" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
class TestMatchRules:
def test_suggest(self, sources, targets, reference_matches, mock_suggest, mock_status_suggest_ok):
job = RULES_API.suggest(sources=sources, targets=targets, matches=reference_matches)
assert isinstance(job, ContextualizationJob)
assert "Queued" == job.status
assert "rules" in job.result
assert "Completed" == job.status
assert 101112 == job.job_id
n_suggest_calls = 0
n_status_calls = 0
for call in mock_suggest.calls:
if call.request.method == "POST":
n_suggest_calls += 1
assert {"sources": sources, "targets": targets, "matches": reference_matches} == jsgz_load(
call.request.body
)
else:
n_status_calls += 1
assert "/101112" in call.request.url
assert 1 == n_suggest_calls
assert 1 == n_status_calls
def test_apply(self, sources, targets, rules, mock_apply, mock_status_apply_ok):
job = RULES_API.apply(sources=sources, targets=targets, rules=rules)
assert isinstance(job, ContextualizationJob)
assert "Queued" == job.status
assert "items" in job.result
assert "Completed" == job.status
assert 121110 == job.job_id
n_apply_calls = 0
n_status_calls = 0
for call in mock_apply.calls:
if call.request.method == "POST":
n_apply_calls += 1
assert {"sources": sources, "targets": targets, "rules": rules} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/121110" in call.request.url
assert 1 == n_apply_calls
assert 1 == n_status_calls
| 29.757576
| 119
| 0.566395
|
4a0fd4e32a321251c0d5e4e833bf107968da2a64
| 1,442
|
py
|
Python
|
src/manager/om/script/gspylib/common/CheckPythonVersion.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | 1
|
2020-06-30T15:00:50.000Z
|
2020-06-30T15:00:50.000Z
|
src/manager/om/script/gspylib/common/CheckPythonVersion.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
src/manager/om/script/gspylib/common/CheckPythonVersion.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import sys
import platform
import re
def checkPythonVersion():
pythonVersion = sys.version_info[0:2]
distName = platform.platform()
if re.search("oe1", distName) is not None:
if not pythonVersion == (3, 7):
raise Exception("[GAUSS-52200] : version of python"
" is not correct: %s." %
distName + " should use Python 3.7.*")
else:
if not pythonVersion == (3, 6):
raise Exception("[GAUSS-52200] : version of python"
" is not correct: %s." %
distName + " should use Python 3.6.*")
return True
if __name__ == '__main__':
try:
checkPythonVersion()
except Exception as e:
raise Exception(e)
| 32.772727
| 78
| 0.579057
|
4a0fd4ee749f7d9a74db614aed3e11b92bead04e
| 2,990
|
py
|
Python
|
src/base/replay_queue.py
|
Kautenja/playing-mario-with-deep-reinforcement-learning
|
bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076
|
[
"MIT"
] | 57
|
2018-04-24T07:07:29.000Z
|
2022-01-19T17:07:13.000Z
|
src/base/replay_queue.py
|
Kautenja/playing-mario-with-deep-reinforcement-learning
|
bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076
|
[
"MIT"
] | 10
|
2018-06-07T14:29:19.000Z
|
2019-07-29T13:48:03.000Z
|
src/base/replay_queue.py
|
Kautenja/playing-mario-with-deep-reinforcement-learning
|
bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076
|
[
"MIT"
] | 11
|
2018-09-11T23:14:37.000Z
|
2021-06-30T03:56:55.000Z
|
"""A queue for storing previous experiences to sample from."""
import numpy as np
class ReplayQueue(object):
"""A replay queue for replaying previous experiences."""
def __init__(self, size: int) -> None:
"""
Initialize a new replay buffer with a given size.
Args:
size: the size of the replay buffer
(the number of previous experiences to store)
Returns:
None
"""
# initialize the queue data-structure as a list of nil values
self.queue = [None] * size
# setup variables for the index and top
self.index = 0
self.top = 0
def __repr__(self) -> str:
"""Return an executable string representation of self."""
return '{}(size={})'.format(self.__class__.__name__, self.size)
@property
def size(self) -> int:
"""Return the size of the queue."""
return len(self.queue)
def push(self,
s: np.ndarray,
a: int,
r: int,
d: bool,
s2: np.ndarray,
) -> None:
"""
Push a new experience onto the queue.
Args:
s: the current state
a: the action to get from current state `s` to next state `s2`
r: the reward resulting from taking action `a` in state `s`
d: the flag denoting whether the episode ended after action `a`
s2: the next state from taking action `a` in state `s`
Returns:
None
"""
# push the variables onto the queue
self.queue[self.index] = s, a, r, d, s2
# increment the index
self.index = (self.index + 1) % self.size
# increment the top pointer
if self.top < self.size:
self.top += 1
def sample(self, size: int=32) -> tuple:
"""
Return a random sample of items from the queue.
Args:
size: the number of items to sample and return
Returns:
A random sample from the queue sampled uniformly
"""
# initialize lists for each component of the batch
s = [None] * size
a = [None] * size
r = [None] * size
d = [None] * size
s2 = [None] * size
# iterate over the indexes and copy references to the arrays
for batch, sample in enumerate(np.random.randint(0, self.top, size)):
_s, _a, _r, _d, _s2 = self.queue[sample]
s[batch] = np.array(_s, copy=False)
a[batch] = _a
r[batch] = _r
d[batch] = _d
s2[batch] = np.array(_s2, copy=False)
# convert the lists to arrays for returning for training
return (
np.array(s),
np.array(a, dtype=np.uint8),
np.array(r, dtype=np.int8),
np.array(d, dtype=np.bool),
np.array(s2),
)
# explicitly define the outward facing API of this module
__all__ = [ReplayQueue.__name__]
| 29.60396
| 77
| 0.545485
|
4a0fd51b41becd5fd5592b478c63bd8f5c9a9122
| 40,167
|
py
|
Python
|
fast_bert/summarisation/modeling_bertabs.py
|
kirankunapuli/fast-bert
|
14f8e21fa9f19582edba40b3159ea933d7c8c815
|
[
"Apache-2.0"
] | null | null | null |
fast_bert/summarisation/modeling_bertabs.py
|
kirankunapuli/fast-bert
|
14f8e21fa9f19582edba40b3159ea933d7c8c815
|
[
"Apache-2.0"
] | null | null | null |
fast_bert/summarisation/modeling_bertabs.py
|
kirankunapuli/fast-bert
|
14f8e21fa9f19582edba40b3159ea933d7c8c815
|
[
"Apache-2.0"
] | null | null | null |
# MIT License
# Copyright (c) 2019 Yang Liu and the HuggingFace team
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import math
import numpy as np
import torch
from torch import nn
from torch.nn.init import xavier_uniform_
from transformers import BertModel, BertConfig, PreTrainedModel
from .configuration_bertabs import BertAbsConfig
MAX_SIZE = 5000
BERTABS_FINETUNED_MODEL_MAP = {
"bertabs-finetuned-cnndm": "https://s3.amazonaws.com/models.huggingface.co/bert/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization-pytorch_model.bin",
}
class BertAbsPreTrainedModel(PreTrainedModel):
config_class = BertAbsConfig
pretrained_model_archive_map = BERTABS_FINETUNED_MODEL_MAP
load_tf_weights = False
base_model_prefix = "bert"
class BertAbs(BertAbsPreTrainedModel):
def __init__(self, args, checkpoint=None, bert_extractive_checkpoint=None):
super(BertAbs, self).__init__(args)
self.args = args
self.bert = Bert()
# If pre-trained weights are passed for Bert, load these.
load_bert_pretrained_extractive = True if bert_extractive_checkpoint else False
if load_bert_pretrained_extractive:
self.bert.model.load_state_dict(
dict(
[
(n[11:], p)
for n, p in bert_extractive_checkpoint.items()
if n.startswith("bert.model")
]
),
strict=True,
)
self.vocab_size = self.bert.model.config.vocab_size
if args.max_pos > 512:
my_pos_embeddings = nn.Embedding(
args.max_pos, self.bert.model.config.hidden_size
)
my_pos_embeddings.weight.data[
:512
] = self.bert.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[
512:
] = self.bert.model.embeddings.position_embeddings.weight.data[-1][
None, :
].repeat(
args.max_pos - 512, 1
)
self.bert.model.embeddings.position_embeddings = my_pos_embeddings
tgt_embeddings = nn.Embedding(
self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0
)
tgt_embeddings.weight = copy.deepcopy(
self.bert.model.embeddings.word_embeddings.weight
)
self.decoder = TransformerDecoder(
self.args.dec_layers,
self.args.dec_hidden_size,
heads=self.args.dec_heads,
d_ff=self.args.dec_ff_size,
dropout=self.args.dec_dropout,
embeddings=tgt_embeddings,
vocab_size=self.vocab_size,
)
gen_func = nn.LogSoftmax(dim=-1)
self.generator = nn.Sequential(
nn.Linear(args.dec_hidden_size, args.vocab_size), gen_func
)
self.generator[0].weight = self.decoder.embeddings.weight
load_from_checkpoints = False if checkpoint is None else True
if load_from_checkpoints:
self.load_state_dict(checkpoint)
def init_weights(self):
for module in self.decoder.modules():
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
for p in self.generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
else:
p.data.zero_()
def forward(
self,
encoder_input_ids,
decoder_input_ids,
token_type_ids,
encoder_attention_mask,
decoder_attention_mask,
):
encoder_output = self.bert(
input_ids=encoder_input_ids,
token_type_ids=token_type_ids,
attention_mask=encoder_attention_mask,
)
encoder_hidden_states = encoder_output[0]
dec_state = self.decoder.init_decoder_state(
encoder_input_ids, encoder_hidden_states
)
decoder_outputs, _ = self.decoder(
decoder_input_ids[:, :-1], encoder_hidden_states, dec_state
)
return decoder_outputs
class Bert(nn.Module):
""" This class is not really necessary and should probably disappear.
"""
def __init__(self):
super(Bert, self).__init__()
config = BertConfig.from_pretrained("bert-base-uncased")
self.model = BertModel(config)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, **kwargs):
self.eval()
with torch.no_grad():
encoder_outputs, _ = self.model(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
**kwargs
)
return encoder_outputs
class TransformerDecoder(nn.Module):
"""
The Transformer decoder from "Attention is All You Need".
Args:
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
embeddings (:obj:`onmt.modules.Embeddings`):
embeddings to use, should have positional encodings
attn_type (str): if using a seperate copy attention
"""
def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, vocab_size):
super(TransformerDecoder, self).__init__()
# Basic attributes.
self.decoder_type = "transformer"
self.num_layers = num_layers
self.embeddings = embeddings
self.pos_emb = PositionalEncoding(dropout, self.embeddings.embedding_dim)
# Build TransformerDecoder.
self.transformer_layers = nn.ModuleList(
[
TransformerDecoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_layers)
]
)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
# forward(input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask)
# def forward(self, input_ids, state, attention_mask=None, memory_lengths=None,
# step=None, cache=None, encoder_attention_mask=None, encoder_hidden_states=None, memory_masks=None):
def forward(
self,
input_ids,
encoder_hidden_states=None,
state=None,
attention_mask=None,
memory_lengths=None,
step=None,
cache=None,
encoder_attention_mask=None,
):
"""
See :obj:`onmt.modules.RNNDecoderBase.forward()`
memory_bank = encoder_hidden_states
"""
# Name conversion
tgt = input_ids
memory_bank = encoder_hidden_states
memory_mask = encoder_attention_mask
# src_words = state.src
src_words = state.src
src_batch, src_len = src_words.size()
padding_idx = self.embeddings.padding_idx
# Decoder padding mask
tgt_words = tgt
tgt_batch, tgt_len = tgt_words.size()
tgt_pad_mask = (
tgt_words.data.eq(padding_idx).unsqueeze(1).expand(tgt_batch, tgt_len, tgt_len)
)
# Encoder padding mask
if memory_mask is not None:
src_len = memory_mask.size(-1)
src_pad_mask = memory_mask.expand(src_batch, tgt_len, src_len)
else:
src_pad_mask = (
src_words.data.eq(padding_idx)
.unsqueeze(1)
.expand(src_batch, tgt_len, src_len)
)
# Pass through the embeddings
emb = self.embeddings(input_ids)
output = self.pos_emb(emb, step)
assert emb.dim() == 3 # len x batch x embedding_dim
if state.cache is None:
saved_inputs = []
for i in range(self.num_layers):
prev_layer_input = None
if state.cache is None:
if state.previous_input is not None:
prev_layer_input = state.previous_layer_inputs[i]
output, all_input = self.transformer_layers[i](
output,
memory_bank,
src_pad_mask,
tgt_pad_mask,
previous_input=prev_layer_input,
layer_cache=state.cache["layer_{}".format(i)]
if state.cache is not None
else None,
step=step,
)
if state.cache is None:
saved_inputs.append(all_input)
if state.cache is None:
saved_inputs = torch.stack(saved_inputs)
output = self.layer_norm(output)
if state.cache is None:
state = state.update_state(tgt, saved_inputs)
# Decoders in transformers return a tuple. Beam search will fail
# if we don't follow this convention.
return output, state # , state
def init_decoder_state(self, src, memory_bank, with_cache=False):
""" Init decoder state """
state = TransformerDecoderState(src)
if with_cache:
state._init_cache(memory_bank, self.num_layers)
return state
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(
(torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))
)
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0)
super(PositionalEncoding, self).__init__()
self.register_buffer("pe", pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
emb = emb * math.sqrt(self.dim)
if step:
emb = emb + self.pe[:, step][:, None, :]
else:
emb = emb + self.pe[:, : emb.size(1)]
emb = self.dropout(emb)
return emb
def get_emb(self, emb):
return self.pe[:, : emb.size(1)]
class TransformerDecoderLayer(nn.Module):
"""
Args:
d_model (int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
heads (int): the number of heads for MultiHeadedAttention.
d_ff (int): the second-layer of the PositionwiseFeedForward.
dropout (float): dropout probability(0-1.0).
self_attn_type (string): type of self-attention scaled-dot, average
"""
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)
self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)
self.drop = nn.Dropout(dropout)
mask = self._get_attn_subsequent_mask(MAX_SIZE)
# Register self.mask as a buffer in TransformerDecoderLayer, so
# it gets TransformerDecoderLayer's cuda behavior automatically.
self.register_buffer("mask", mask)
def forward(
self,
inputs,
memory_bank,
src_pad_mask,
tgt_pad_mask,
previous_input=None,
layer_cache=None,
step=None,
):
"""
Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
* all_input `[batch_size x current_step x model_dim]`
"""
dec_mask = torch.gt(
tgt_pad_mask + self.mask[:, : tgt_pad_mask.size(1), : tgt_pad_mask.size(1)], 0
)
input_norm = self.layer_norm_1(inputs)
all_input = input_norm
if previous_input is not None:
all_input = torch.cat((previous_input, input_norm), dim=1)
dec_mask = None
query = self.self_attn(
all_input,
all_input,
input_norm,
mask=dec_mask,
layer_cache=layer_cache,
type="self",
)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid = self.context_attn(
memory_bank,
memory_bank,
query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
type="context",
)
output = self.feed_forward(self.drop(mid) + query)
return output, all_input
# return output
def _get_attn_subsequent_mask(self, size):
"""
Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]`
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8")
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention module from
"Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.
Similar to standard `dot` attention but uses
multiple attention distributions simulataneously
to select relevant items.
.. mermaid::
graph BT
A[key]
B[value]
C[query]
O[output]
subgraph Attn
D[Attn 1]
E[Attn 2]
F[Attn N]
end
A --> D
C --> D
A --> E
C --> E
A --> F
C --> F
D --> O
E --> O
F --> O
B --> O
Also includes several additional tricks.
Args:
head_count (int): number of parallel heads
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head)
self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head)
self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.use_final_linear = use_final_linear
if self.use_final_linear:
self.final_linear = nn.Linear(model_dim, model_dim)
def forward(
self,
key,
value,
query,
mask=None,
layer_cache=None,
type=None,
predefined_graph_1=None,
):
"""
Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]`
"""
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
key_len = key.size(1)
query_len = query.size(1)
def shape(x):
""" projection """
return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return (
x.transpose(1, 2)
.contiguous()
.view(batch_size, -1, head_count * dim_per_head)
)
# 1) Project key, value, and query.
if layer_cache is not None:
if type == "self":
query, key, value = (
self.linear_query(query),
self.linear_keys(query),
self.linear_values(query),
)
key = shape(key)
value = shape(value)
if layer_cache is not None:
device = key.device
if layer_cache["self_keys"] is not None:
key = torch.cat((layer_cache["self_keys"].to(device), key), dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat(
(layer_cache["self_values"].to(device), value), dim=2
)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif type == "context":
query = self.linear_query(query)
if layer_cache is not None:
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key), self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = (
layer_cache["memory_keys"],
layer_cache["memory_values"],
)
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key, value = self.linear_keys(key), self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
query = shape(query)
key_len = key.size(2)
query_len = query.size(2)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
scores = torch.matmul(query, key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(scores)
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores)
if not predefined_graph_1 is None:
attn_masked = attn[:, -1] * predefined_graph_1
attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9)
attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1)
drop_attn = self.dropout(attn)
if self.use_final_linear:
context = unshape(torch.matmul(drop_attn, value))
output = self.final_linear(context)
return output
else:
context = torch.matmul(drop_attn, value)
return context
class DecoderState(object):
"""Interface for grouping together the current state of a recurrent
decoder. In the simplest case just represents the hidden state of
the model. But can also be used for implementing various forms of
input_feeding and non-recurrent models.
Modules need to implement this to utilize beam search decoding.
"""
def detach(self):
""" Need to document this """
self.hidden = tuple([_.detach() for _ in self.hidden])
self.input_feed = self.input_feed.detach()
def beam_update(self, idx, positions, beam_size):
""" Need to document this """
for e in self._all:
sizes = e.size()
br = sizes[1]
if len(sizes) == 3:
sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[
:, :, idx
]
else:
sent_states = e.view(
sizes[0], beam_size, br // beam_size, sizes[2], sizes[3]
)[:, :, idx]
sent_states.data.copy_(sent_states.data.index_select(1, positions))
def map_batch_fn(self, fn):
raise NotImplementedError()
class TransformerDecoderState(DecoderState):
""" Transformer Decoder state base class """
def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
self.previous_layer_inputs = None
self.cache = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
if self.previous_input is not None and self.previous_layer_inputs is not None:
return (self.previous_input, self.previous_layer_inputs, self.src)
else:
return (self.src,)
def detach(self):
if self.previous_input is not None:
self.previous_input = self.previous_input.detach()
if self.previous_layer_inputs is not None:
self.previous_layer_inputs = self.previous_layer_inputs.detach()
self.src = self.src.detach()
def update_state(self, new_input, previous_layer_inputs):
state = TransformerDecoderState(self.src)
state.previous_input = new_input
state.previous_layer_inputs = previous_layer_inputs
return state
def _init_cache(self, memory_bank, num_layers):
self.cache = {}
for l in range(num_layers):
layer_cache = {"memory_keys": None, "memory_values": None}
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
self.cache["layer_{}".format(l)] = layer_cache
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = self.src.data.repeat(1, beam_size, 1)
def map_batch_fn(self, fn):
def _recursive_map(struct, batch_dim=0):
for k, v in struct.items():
if v is not None:
if isinstance(v, dict):
_recursive_map(v)
else:
struct[k] = fn(v, batch_dim)
self.src = fn(self.src, 0)
if self.cache is not None:
_recursive_map(self.cache)
def gelu(x):
return (
0.5
* x
* (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
)
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
#
# TRANSLATOR
# The following code is used to generate summaries using the
# pre-trained weights and beam search.
#
def build_predictor(args, tokenizer, symbols, model, logger=None):
# we should be able to refactor the global scorer a lot
scorer = GNMTGlobalScorer(args.alpha, length_penalty="wu")
translator = Translator(
args, model, tokenizer, symbols, global_scorer=scorer, logger=logger
)
return translator
class GNMTGlobalScorer(object):
"""
NMT re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`
Args:
alpha (float): length parameter
beta (float): coverage parameter
"""
def __init__(self, alpha, length_penalty):
self.alpha = alpha
penalty_builder = PenaltyBuilder(length_penalty)
self.length_penalty = penalty_builder.length_penalty()
def score(self, beam, logprobs):
"""
Rescores a prediction based on penalty functions
"""
normalized_probs = self.length_penalty(beam, logprobs, self.alpha)
return normalized_probs
class PenaltyBuilder(object):
"""
Returns the Length and Coverage Penalty function for Beam Search.
Args:
length_pen (str): option name of length pen
cov_pen (str): option name of cov pen
"""
def __init__(self, length_pen):
self.length_pen = length_pen
def length_penalty(self):
if self.length_pen == "wu":
return self.length_wu
elif self.length_pen == "avg":
return self.length_average
else:
return self.length_none
"""
Below are all the different penalty terms implemented so far
"""
def length_wu(self, beam, logprobs, alpha=0.0):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha)
return logprobs / modifier
def length_average(self, beam, logprobs, alpha=0.0):
"""
Returns the average probability of tokens in a sequence.
"""
return logprobs / len(beam.next_ys)
def length_none(self, beam, logprobs, alpha=0.0, beta=0.0):
"""
Returns unmodified scores.
"""
return logprobs
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None):
self.logger = logger
self.args = args
self.model = model
self.generator = self.model.generator
self.vocab = vocab
self.symbols = symbols
self.start_token = symbols["BOS"]
self.end_token = symbols["EOS"]
self.global_scorer = global_scorer
self.beam_size = args.beam_size
self.min_length = args.min_length
self.max_length = args.max_length
def translate(self, batch, step, attn_debug=False):
""" Generates summaries from one batch of data.
"""
self.model.eval()
with torch.no_grad():
batch_data = self.translate_batch(batch)
translations = self.from_batch(batch_data)
return translations
def translate_batch(self, batch, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
return self._fast_translate_batch(
batch, self.max_length, min_length=self.min_length
)
# Where the beam search lives
# I have no idea why it is being called from the method above
def _fast_translate_batch(self, batch, max_length, min_length=0):
""" Beam Search using the encoder inputs contained in `batch`.
"""
# The batch object is funny
# Instead of just looking at the size of the arguments we encapsulate
# a size argument.
# Where is it defined?
beam_size = self.beam_size
batch_size = batch.batch_size
src = batch.src
segs = batch.segs
mask_src = batch.mask_src
src_features = self.model.bert(src, segs, mask_src)
dec_states = self.model.decoder.init_decoder_state(
src, src_features, with_cache=True
)
device = src_features.device
# Tile states and memory beam_size times.
dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim))
src_features = tile(src_features, beam_size, dim=0)
batch_offset = torch.arange(batch_size, dtype=torch.long, device=device)
beam_offset = torch.arange(
0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device
)
alive_seq = torch.full(
[batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device
)
# Give full probability to the first beam on the first step.
topk_log_probs = torch.tensor(
[0.0] + [float("-inf")] * (beam_size - 1), device=device
).repeat(batch_size)
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["gold_score"] = [0] * batch_size
results["batch"] = batch
for step in range(max_length):
decoder_input = alive_seq[:, -1].view(1, -1)
# Decoder forward.
decoder_input = decoder_input.transpose(0, 1)
dec_out, dec_states = self.model.decoder(
decoder_input, src_features, dec_states, step=step
)
# Generator forward.
log_probs = self.generator.forward(dec_out.transpose(0, 1).squeeze(0))
vocab_size = log_probs.size(-1)
if step < min_length:
log_probs[:, self.end_token] = -1e20
# Multiply probs by the beam probability.
log_probs += topk_log_probs.view(-1).unsqueeze(1)
alpha = self.global_scorer.alpha
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
if self.args.block_trigram:
cur_len = alive_seq.size(1)
if cur_len > 3:
for i in range(alive_seq.size(0)):
fail = False
words = [int(w) for w in alive_seq[i]]
words = [self.vocab.ids_to_tokens[w] for w in words]
words = " ".join(words).replace(" ##", "").split()
if len(words) <= 3:
continue
trigrams = [
(words[i - 1], words[i], words[i + 1])
for i in range(1, len(words) - 1)
]
trigram = tuple(trigrams[-1])
if trigram in trigrams[:-1]:
fail = True
if fail:
curr_scores[i] = -10e20
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
# Recover log probs.
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
topk_beam_index = topk_ids.div(vocab_size)
topk_ids = topk_ids.fmod(vocab_size)
# Map beam_index to batch_index in the flat representation.
batch_index = topk_beam_index + beam_offset[
: topk_beam_index.size(0)
].unsqueeze(1)
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1
)
is_finished = topk_ids.eq(self.end_token)
if step + 1 == max_length:
is_finished.fill_(1)
# End condition is top beam is finished.
end_condition = is_finished[:, 0].eq(1)
# Save finished hypotheses.
if is_finished.any():
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:]))
# If the batch reached the end, save the n_best hypotheses.
if end_condition[i]:
best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True)
score, pred = best_hyp[0]
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(0).nonzero().view(-1)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished).view(
-1, alive_seq.size(-1)
)
# Reorder states.
select_indices = batch_index.view(-1)
src_features = src_features.index_select(0, select_indices)
dec_states.map_batch_fn(
lambda state, dim: state.index_select(dim, select_indices)
)
return results
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert len(translation_batch["gold_score"]) == len(translation_batch["predictions"])
batch_size = batch.batch_size
preds, _, _, tgt_str, src = (
translation_batch["predictions"],
translation_batch["scores"],
translation_batch["gold_score"],
batch.tgt_str,
batch.src,
)
translations = []
for b in range(batch_size):
pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])
pred_sents = " ".join(pred_sents).replace(" ##", "")
gold_sent = " ".join(tgt_str[b].split())
raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]
raw_src = " ".join(raw_src)
translation = (pred_sents, gold_sent, raw_src)
translations.append(translation)
return translations
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = (
x.view(batch, -1)
.transpose(0, 1)
.repeat(count, 1)
.transpose(0, 1)
.contiguous()
.view(*out_size)
)
if dim != 0:
x = x.permute(perm).contiguous()
return x
#
# Optimizer for training. We keep this here in case we want to add
# a finetuning script.
#
class BertSumOptimizer(object):
""" Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
summarization using two Adam Optimizers with different warm-up steps and
learning rate. They also use a custom learning rate scheduler.
[1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
"""
def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
self.encoder = model.encoder
self.decoder = model.decoder
self.lr = lr
self.warmup_steps = warmup_steps
self.optimizers = {
"encoder": torch.optim.Adam(
model.encoder.parameters(),
lr=lr["encoder"],
betas=(beta_1, beta_2),
eps=eps,
),
"decoder": torch.optim.Adam(
model.decoder.parameters(),
lr=lr["decoder"],
betas=(beta_1, beta_2),
eps=eps,
),
}
self._step = 0
self.current_learning_rates = {}
def _update_rate(self, stack):
return self.lr[stack] * min(
self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5)
)
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
self.current_learning_rates[stack] = new_rate
| 34.567126
| 169
| 0.581771
|
4a0fd5f2fa2be782035e2b6eb292d6e8b8160216
| 835
|
py
|
Python
|
src/python/comments/comment_extractor.py
|
pamelarussell/github-bioinformatics
|
0e7184cae57426c25cfa0e838637d34adf0a59e7
|
[
"MIT"
] | 32
|
2018-05-14T20:34:08.000Z
|
2022-03-22T12:37:19.000Z
|
src/python/comments/comment_extractor.py
|
pamelarussell/github-bioinformatics
|
0e7184cae57426c25cfa0e838637d34adf0a59e7
|
[
"MIT"
] | null | null | null |
src/python/comments/comment_extractor.py
|
pamelarussell/github-bioinformatics
|
0e7184cae57426c25cfa0e838637d34adf0a59e7
|
[
"MIT"
] | 6
|
2018-07-11T17:15:07.000Z
|
2021-08-02T19:51:40.000Z
|
import abc
class CommentExtractor(object):
""" Abstract class for extracting comments from source code
Subclasses implement the functionality for specific languages
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def extract_comments(self, file_contents):
""" Returns a list of comments in the source code content
Args:
file_contents: Contents of a source code file as a single string including newline characters
Returns:
List of comments in the source code. Each multiline comment is one element of the list,
regardless of how many lines it spans in the source code. Comment characters
are removed.
* COMMENTS ARE NOT NECESSARILY RETURNED IN ORDER *
"""
return
| 30.925926
| 105
| 0.644311
|
4a0fd6bd6e199b45270b180d23f4a581625e92dc
| 2,585
|
py
|
Python
|
downloader/downloads_app.py
|
allanvobraun/SnesGameManager
|
74d7040206d88aec452ad50ff2fc17193759d70b
|
[
"MIT"
] | 7
|
2020-02-18T01:34:51.000Z
|
2021-09-12T00:55:34.000Z
|
downloader/downloads_app.py
|
allanvobraun/SnesGameManager
|
74d7040206d88aec452ad50ff2fc17193759d70b
|
[
"MIT"
] | null | null | null |
downloader/downloads_app.py
|
allanvobraun/SnesGameManager
|
74d7040206d88aec452ad50ff2fc17193759d70b
|
[
"MIT"
] | 2
|
2020-05-10T03:39:16.000Z
|
2020-07-20T18:33:39.000Z
|
from time import sleep
from PyQt5.QtCore import QThread, pyqtSignal, QObject
from PyQt5.QtWidgets import QDialog
from math import floor
from downloader.thread_fix import nongui
from downloader.ui.download_dialog import *
from downloader.format_download import download_cover
from main import ROOT_DIR
"""
Faz todo o processo para baixar as capas dos games
"""
class Flager(QObject): # Classe para se criar gatilhos de eventos, ou signals
downloaded = pyqtSignal() # Sinaliza que um download foi completo
class DowloadThread(QThread): # Classe para gerenciar o processo de download
def __init__(self, roms):
QThread.__init__(self)
self.break_loop = False
self.roms = roms
self.flare = Flager()
def __del__(self):
print("Tread finished")
self.wait()
def break_thread(self):
self.break_loop = True
self.quit()
def run(self):
self.work()
@nongui
def work(self):
for rom in self.roms:
if self.break_loop:
break
download_cover(rom, out_path=f"{ROOT_DIR}/covers")
self.flare.downloaded.emit()
class DownloadDialog(QDialog, Ui_DownloadDialog): # Classe do popup de download
def __init__(self, roms, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.setFixedSize(self.width(), self.height())
# atributos
self.roms = roms
self.roms_amount = len(self.roms)
self.completed = 0
self.percent = 0
self.progressBar.setValue(self.percent)
self.th = DowloadThread(self.roms)
# conexões
self.buttonBox.rejected.connect(self.cancel)
self.th.flare.downloaded.connect(self.add_progress)
def closeEvent(self, *args, **kwargs): # evento para fechar o dialog
super(QDialog, self).closeEvent(*args, **kwargs)
self.cancel()
self.close()
del self
def start_download(self): # começa a fazer o downlaod
self.show()
self.th.run()
def cancel(self): # quebra o processo de download
self.th.break_thread()
def add_progress(self): # Lida com a barra de progresso
self.completed += 1
self.percent = floor((self.completed / self.roms_amount) * 100)
self.progressBar.setValue(self.percent)
if self.percent == 100:
self.change_label("Download complete")
sleep(0.5)
self.close()
def change_label(self, txt): # Troca o texto do dialog
self.label.setText(txt)
| 29.044944
| 80
| 0.64294
|
4a0fd7047a310c3d87a28329c4e1f185ece3c744
| 7,498
|
py
|
Python
|
neurolang/utils/tests/test_relational_algebra_set.py
|
tgy/NeuroLang
|
4b2fd4202db40de8336a82938ec50cd41055ee28
|
[
"BSD-3-Clause"
] | null | null | null |
neurolang/utils/tests/test_relational_algebra_set.py
|
tgy/NeuroLang
|
4b2fd4202db40de8336a82938ec50cd41055ee28
|
[
"BSD-3-Clause"
] | null | null | null |
neurolang/utils/tests/test_relational_algebra_set.py
|
tgy/NeuroLang
|
4b2fd4202db40de8336a82938ec50cd41055ee28
|
[
"BSD-3-Clause"
] | null | null | null |
from ..relational_algebra_set import (NamedRelationalAlgebraFrozenSet,
RelationalAlgebraFrozenSet,
RelationalAlgebraSet)
def test_relational_algebra_set_semantics_empty():
ras = RelationalAlgebraSet()
assert len(ras) == 0
assert ras.arity == 0
assert list(iter(ras)) == []
ras.add((0, 1))
assert (0, 1) in ras
assert len(ras) == 1
assert ras.arity == 2
def test_relational_algebra_set_semantics():
a = [5, 4, 3, 2, 3, 1]
ras = RelationalAlgebraSet(a)
assert len(ras) == len(a) - 1
ras.discard(5)
assert 5 not in ras
assert len(ras) == len(a) - 2
ras.add(10)
assert len(ras) == len(a) - 1
assert 10 in ras
assert all(a_ in ras for a_ in a if a_ != 5)
def test_relational_algebra_ra_projection():
a = [(i % 2, i, i * 2) for i in range(5)]
ras = RelationalAlgebraSet(a)
ras_0 = ras.projection(0)
assert (0,) in ras_0 and (1,) in ras_0
assert len(ras_0) == 2
ras_0 = ras.projection(0, 2)
assert all((i % 2, i * 2) for i in range(5))
def test_relational_algebra_ra_selection():
a = [(i % 2, i, i * 2) for i in range(5)]
ras = RelationalAlgebraSet(a)
ras_0 = ras.selection({0: 1})
a_sel = set((i % 2, i, i * 2) for i in range(5) if i % 2 == 1)
assert ras_0 == a_sel
ras_0 = ras.selection({0: 1, 1: 2})
a_sel = set(
(i % 2, i, i * 2) for i in range(5)
if i % 2 == 1 and i == 2
)
assert ras_0 == a_sel
def test_relational_algebra_ra_equijoin():
a = [(i, i * 2) for i in range(5)]
b = [(i * 2, i * 3) for i in range(5)]
c = [(i, i * 2, i * 2, i * 3) for i in range(5)]
d = [(i, i * 2, i, i * 2) for i in range(5)]
ras_a = RelationalAlgebraSet(a)
ras_b = RelationalAlgebraSet(b)
ras_c = RelationalAlgebraSet(c)
ras_d = RelationalAlgebraSet(d)
res = ras_a.equijoin(ras_b, [(1, 0)])
assert res == ras_c
res = ras_a.equijoin(ras_a, [(0, 0)])
assert res == ras_d
def test_relational_algebra_ra_cross_product():
a = [(i, i * 2) for i in range(5)]
b = [(i * 2, i * 3) for i in range(5)]
c = [u + v for u in a for v in b]
ras_a = RelationalAlgebraSet(a)
ras_b = RelationalAlgebraSet(b)
ras_c = RelationalAlgebraSet(c)
res = ras_a.cross_product(ras_b)
assert res == ras_c
def test_relational_algebra_ra_equijoin_mixed_types():
a = [(chr(ord('a') + i), i * 2) for i in range(5)]
b = [(i * 2, i * 3) for i in range(5)]
c = [(chr(ord('a') + i), i * 2, i * 2, i * 3) for i in range(5)]
ras_a = RelationalAlgebraSet(a)
ras_b = RelationalAlgebraSet(b)
ras_c = RelationalAlgebraSet(c)
res = ras_a.equijoin(ras_b, [(1, 0)])
assert res == ras_c
def test_groupby():
a = [
(i, i * j)
for i in (1, 2)
for j in (2, 3, 4)
]
b = [(1, j) for j in (2, 3, 4)]
c = [(2, 2 * j) for j in (2, 3, 4)]
ras_a = RelationalAlgebraSet(a)
ras_b = RelationalAlgebraSet(b)
ras_c = RelationalAlgebraSet(c)
res = list(ras_a.groupby(0))
assert res[0] == (1, ras_b)
assert res[1] == (2, ras_c)
def test_named_relational_algebra_set_semantics_empty():
ras = NamedRelationalAlgebraFrozenSet(('y', 'x'))
assert len(ras) == 0
assert ras.arity == 2
assert list(iter(ras)) == []
ras = NamedRelationalAlgebraFrozenSet(('y', 'x'), [(0, 1)])
assert (0, 1) in ras
assert {'x': 1, 'y': 0} in ras
assert {'y': 1, 'x': 1} not in ras
assert len(ras) == 1
assert ras.arity == 2
def test_named_relational_algebra_ra_projection():
a = [(i % 2, i, i * 2) for i in range(5)]
ras = NamedRelationalAlgebraFrozenSet(('x', 'y', 'z'), a)
ras_x = ras.projection('x')
assert (0,) in ras_x and (1,) in ras_x
assert len(ras_x) == 2
assert ras_x.columns == ('x',)
ras_xz = ras.projection('x', 'z')
assert all((i % 2, i * 2) in ras_xz for i in range(5))
def test_named_relational_algebra_ra_selection():
a = [(i % 2, i, i * 2) for i in range(5)]
ras = NamedRelationalAlgebraFrozenSet(('x', 'y', 'z'), a)
ras_0 = ras.selection({'x': 1})
a_sel = NamedRelationalAlgebraFrozenSet(
ras.columns,
set((i % 2, i, i * 2) for i in range(5) if i % 2 == 1)
)
assert ras_0 == a_sel
ras_0 = ras.selection({'x': 1, 'y': 2})
a_sel = NamedRelationalAlgebraFrozenSet(
ras.columns,
set(
(i % 2, i, i * 2) for i in range(5)
if i % 2 == 1 and i == 2
)
)
assert ras_0 == a_sel
def test_named_relational_algebra_ra_naturaljoin():
a = [(i, i * 2) for i in range(5)]
b = [(i * 2, i * 3) for i in range(5)]
c = [(i, i * 2, i * 3) for i in range(5)]
d = [(i, i * 2, j * 2, j * 3) for i in range(5) for j in range(5)]
ras_a = NamedRelationalAlgebraFrozenSet(('z', 'y'), a)
ras_b = NamedRelationalAlgebraFrozenSet(('y', 'x'), b)
ras_b2 = NamedRelationalAlgebraFrozenSet(('u', 'v'), b)
ras_c = NamedRelationalAlgebraFrozenSet(('z', 'y', 'x'), c)
ras_d = NamedRelationalAlgebraFrozenSet(('z', 'y', 'u', 'v'), d)
res = ras_a.naturaljoin(ras_b)
assert res == ras_c
res = ras_a.naturaljoin(ras_a)
assert res == ras_a
res = ras_a.naturaljoin(ras_b2)
assert res == ras_d
def test_named_relational_algebra_ra_cross_product():
a = [(i, i * 2) for i in range(5)]
b = [(i * 2, i * 3) for i in range(5)]
c = [u + v for u in a for v in b]
ras_a = NamedRelationalAlgebraFrozenSet(('x', 'y'), a)
ras_b = NamedRelationalAlgebraFrozenSet(('u', 'v'), b)
ras_c = NamedRelationalAlgebraFrozenSet(('x', 'y', 'u', 'v'), c)
res = ras_a.cross_product(ras_b)
assert res == ras_c
def test_named_relational_algebra_difference():
a = [(i, i * 2) for i in range(5)]
b = [(i, i * 2) for i in range(1, 5)]
c = [(i, i * 2) for i in range(1)]
ras_a = NamedRelationalAlgebraFrozenSet(('x', 'y'), a)
ras_b = NamedRelationalAlgebraFrozenSet(('x', 'y'), b)
ras_c = NamedRelationalAlgebraFrozenSet(('x', 'y'), c)
res = ras_a - ras_b
assert res == ras_c
def test_named_groupby():
a = [
(i, i * j)
for i in (1, 2)
for j in (2, 3, 4)
]
b = [(1, j) for j in (2, 3, 4)]
c = [(2, 2 * j) for j in (2, 3, 4)]
cols = ('x', 'y')
ras_a = NamedRelationalAlgebraFrozenSet(cols, a)
ras_b = NamedRelationalAlgebraFrozenSet(cols, b)
ras_c = NamedRelationalAlgebraFrozenSet(cols, c)
res = list(ras_a.groupby('x'))
assert res[0] == (1, ras_b)
assert res[1] == (2, ras_c)
def test_named_iter():
a = [
(i, i * j)
for i in (1, 2)
for j in (2, 3, 4)
]
cols = ('y', 'x')
ras_a = NamedRelationalAlgebraFrozenSet(cols, a)
res = list(iter(ras_a))
assert res == a
def test_rename_column():
a = [
(i, i * j)
for i in (1, 2)
for j in (2, 3, 4)
]
cols = ('y', 'x')
ras_a = NamedRelationalAlgebraFrozenSet(cols, a)
ras_b = ras_a.rename_column('y', 'z')
assert all(
el_a.x == el_b.x and el_a.y == el_b.z
for el_a, el_b in zip(ras_a, ras_b)
)
def test_named_to_unnamed():
a = [
(i, i * j)
for i in (1, 2)
for j in (2, 3, 4)
]
cols = ('y', 'x')
ras_a = NamedRelationalAlgebraFrozenSet(cols, a)
ras_b = RelationalAlgebraFrozenSet(a)
assert ras_a.to_unnamed() == ras_b
| 26.034722
| 70
| 0.56215
|
4a0fd721f8712c05c5aaa308b142a4f2314dcac9
| 4,015
|
py
|
Python
|
nbgrader/tests/apps/test_nbgrader_fetch.py
|
FrattisUC/nbgrader
|
f6402dcbb875e41ee3317be9e7af518afda9f72c
|
[
"BSD-3-Clause-Clear"
] | 2
|
2021-09-11T20:32:18.000Z
|
2021-09-11T20:32:37.000Z
|
nbgrader/tests/apps/test_nbgrader_fetch.py
|
FrattisUC/nbgrader
|
f6402dcbb875e41ee3317be9e7af518afda9f72c
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
nbgrader/tests/apps/test_nbgrader_fetch.py
|
FrattisUC/nbgrader
|
f6402dcbb875e41ee3317be9e7af518afda9f72c
|
[
"BSD-3-Clause-Clear"
] | 1
|
2019-09-13T07:46:09.000Z
|
2019-09-13T07:46:09.000Z
|
# -*- coding: utf-8 -*-
import io
import os
from os.path import join
from .. import run_nbgrader
from .base import BaseTestApp
from .conftest import notwindows
@notwindows
class TestNbGraderFetch(BaseTestApp):
def _release(self, assignment, exchange, course_dir, course="abc101"):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", assignment, "p1.ipynb"))
run_nbgrader([
"release", assignment,
"--course", course,
"--Exchange.root={}".format(exchange)
])
def _fetch(self, assignment, exchange, flags=None, retcode=0, course="abc101"):
cmd = [
"fetch", assignment,
"--course", course,
"--Exchange.root={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd, retcode=retcode)
def _fetch_multi(self, assignments, exchange, flags=None, retcode=0, course="abc101"):
cmd = [
"fetch",
"--course", course,
"--Exchange.root={}".format(exchange)
]
cmd.extend(assignments)
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd, retcode=retcode)
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["fetch", "--help-all"])
def test_no_course_id(self, exchange, course_dir):
"""Does releasing without a course id thrown an error?"""
self._release("ps1", exchange, course_dir)
cmd = [
"fetch", "ps1",
"--Exchange.root={}".format(exchange)
]
run_nbgrader(cmd, retcode=1)
def test_fetch(self, exchange, course_dir):
self._release("ps1", exchange, course_dir)
self._fetch("ps1", exchange)
assert os.path.isfile(join("ps1", "p1.ipynb"))
# make sure it fails if the assignment already exists
self._fetch("ps1", exchange, retcode=1)
# make sure it fails even if the assignment is incomplete
os.remove(join("ps1", "p1.ipynb"))
self._fetch("ps1", exchange, retcode=1)
# make sure it passes if the --replace flag is given
self._fetch("ps1", exchange, flags=["--replace"])
assert os.path.isfile(join("ps1", "p1.ipynb"))
# make sure the --replace flag doesn't overwrite files, though
self._copy_file(join("files", "submitted-changed.ipynb"), join("ps1", "p1.ipynb"))
with io.open(join("ps1", "p1.ipynb"), mode="r", encoding='utf-8') as fh:
contents1 = fh.read()
self._fetch("ps1", exchange, flags=["--replace"])
with io.open(join("ps1", "p1.ipynb"), mode="r", encoding='utf-8') as fh:
contents2 = fh.read()
assert contents1 == contents2
def test_fetch_with_assignment_flag(self, exchange, course_dir):
self._release("ps1", exchange, course_dir)
self._fetch("--assignment=ps1", exchange)
assert os.path.isfile(join("ps1", "p1.ipynb"))
def test_fetch_multiple_courses(self, exchange, course_dir):
self._release("ps1", exchange, course_dir, course="abc101")
self._fetch("ps1", exchange, course="abc101", flags=["--Exchange.path_includes_course=True"])
assert os.path.isfile(join("abc101", "ps1", "p1.ipynb"))
self._release("ps1", exchange, course_dir, course="abc102")
self._fetch("ps1", exchange, course="abc102", flags=["--Exchange.path_includes_course=True"])
assert os.path.isfile(join("abc102", "ps1", "p1.ipynb"))
def test_fetch_multiple_assignments(self, exchange, course_dir):
self._release("ps1", exchange, course_dir, course="abc101")
self._release("ps2", exchange, course_dir, course="abc101")
self._fetch_multi(["ps1", "ps2"], exchange, course="abc101", flags=["--Exchange.path_includes_course=True"])
assert os.path.isfile(join("abc101", "ps1", "p1.ipynb"))
assert os.path.isfile(join("abc101", "ps2", "p1.ipynb"))
| 37.523364
| 116
| 0.612204
|
4a0fd7897cbb197a81dd49142ccd05b65c06cac4
| 8,868
|
py
|
Python
|
kubernetes/test/test_v1beta1_storage_class_list.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta1_storage_class_list.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta1_storage_class_list.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v1beta1_storage_class_list import V1beta1StorageClassList # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1beta1StorageClassList(unittest.TestCase):
"""V1beta1StorageClassList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1StorageClassList
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.v1beta1_storage_class_list.V1beta1StorageClassList() # noqa: E501
if include_optional :
return V1beta1StorageClassList(
api_version = '0',
items = [
kubernetes.client.models.v1beta1/storage_class.v1beta1.StorageClass(
allow_volume_expansion = True,
allowed_topologies = [
kubernetes.client.models.v1/topology_selector_term.v1.TopologySelectorTerm(
match_label_expressions = [
kubernetes.client.models.v1/topology_selector_label_requirement.v1.TopologySelectorLabelRequirement(
key = '0',
values = [
'0'
], )
], )
],
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta.v1.ObjectMeta(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference.v1.OwnerReference(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
mount_options = [
'0'
],
parameters = {
'key' : '0'
},
provisioner = '0',
reclaim_policy = '0',
volume_binding_mode = '0', )
],
kind = '0',
metadata = kubernetes.client.models.v1/list_meta.v1.ListMeta(
continue = '0',
remaining_item_count = 56,
resource_version = '0',
self_link = '0', )
)
else :
return V1beta1StorageClassList(
items = [
kubernetes.client.models.v1beta1/storage_class.v1beta1.StorageClass(
allow_volume_expansion = True,
allowed_topologies = [
kubernetes.client.models.v1/topology_selector_term.v1.TopologySelectorTerm(
match_label_expressions = [
kubernetes.client.models.v1/topology_selector_label_requirement.v1.TopologySelectorLabelRequirement(
key = '0',
values = [
'0'
], )
], )
],
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta.v1.ObjectMeta(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference.v1.OwnerReference(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
mount_options = [
'0'
],
parameters = {
'key' : '0'
},
provisioner = '0',
reclaim_policy = '0',
volume_binding_mode = '0', )
],
)
def testV1beta1StorageClassList(self):
"""Test V1beta1StorageClassList"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 47.42246
| 136
| 0.379229
|
4a0fd7e1e22b08b8571f5dcfc39eda1f28148d4f
| 2,404
|
py
|
Python
|
mbmega_face_expressions/actions.py
|
gregfreeman/mbmega_face_expressions
|
d593f5be2f40ffcb375119ee3fb7262916b450ca
|
[
"BSD-3-Clause"
] | null | null | null |
mbmega_face_expressions/actions.py
|
gregfreeman/mbmega_face_expressions
|
d593f5be2f40ffcb375119ee3fb7262916b450ca
|
[
"BSD-3-Clause"
] | null | null | null |
mbmega_face_expressions/actions.py
|
gregfreeman/mbmega_face_expressions
|
d593f5be2f40ffcb375119ee3fb7262916b450ca
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import numpy as np
import asyncio as aio
from time import sleep
from random import random
import math
from makeblock import MegaPi, SerialPort
log = logging.getLogger(__name__)
A6 = 60
A7 = 61
A8 = 62
A9 = 63
A10 = 64
A11 = 65
A12 = 66
A13 = 67
A14 = 68
A15 = 69
COLORS = {
'black': (0, 0, 0),
'white': (255, 255, 255),
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255),
'yellow': (255, 255, 0),
'magenta': (255, 0, 255),
}
def set_rgb(bot, r, g, b):
"""
set 2x4 rgb leds to the same color
"""
led = bot.RGBLed()
for idx in range(1, 5):
led.set_color(idx, r, g, b, A13)
led.set_color(idx, r, g, b, A14)
async def move_xyw(bot, x, y, w, t):
"""
move robot in x, y, w space for t seconds
x = forward
y = left
w = rotate about z (x cross y)
robot has mecanum wheels
"""
motor1 = bot.DCMotor(1, 1) # RF forward
motor2 = bot.DCMotor(1, 2) # RR forward
motor3 = bot.DCMotor(2, 1) # LR reverse
motor4 = bot.DCMotor(2, 2) # LF reverse
# motor mapping
R_motor = np.array([[+1, +1, -1, -1], # X
[+1, -1, -1, +1], # Y
[+1, +1, +1, +1]]) # w (z rotation)
xyw = np.array([x, y, w])
motor_cmd = xyw @ R_motor
motor1.run(motor_cmd[0])
motor2.run(motor_cmd[1])
motor3.run(motor_cmd[2])
motor4.run(motor_cmd[3])
await aio.sleep(t)
motor1.run(0)
motor2.run(0)
motor3.run(0)
motor4.run(0)
async def happy_action(bot):
log.info('happy action')
set_rgb(bot, *COLORS['green'])
await move_xyw(bot, 0, 0, 50, 0.3)
await aio.sleep(0.2)
await move_xyw(bot, 0, 0, -50, 0.3)
async def angry_action(bot):
log.info('angry action')
set_rgb(bot, *COLORS['red'])
await move_xyw(bot, 0, 50, 0, 0.3)
await aio.sleep(0.2)
await move_xyw(bot, 0, -50, 0, 0.3)
async def show_blue(bot):
log.info('show_blue')
set_rgb(bot, *COLORS['blue'])
async def surprised_action(bot):
log.info('surprised_action')
set_rgb(bot, *COLORS['yellow'])
await move_xyw(bot, 100, 0, 0, 0.3)
await aio.sleep(0.2)
await move_xyw(bot, -100, 0, 0, 0.3)
async def show_magenta(bot):
log.info('show_magenta')
set_rgb(bot, *COLORS['magenta'])
async def led_off(bot):
log.info('led_off')
set_rgb(bot, *COLORS['black'])
| 21.087719
| 61
| 0.571131
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.