code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import sys
import unittest
import tempfile
import shutil
from pkg_resources import resource_filename #@UnresolvedImport
from pyPaSWAS.Core.Exceptions import InvalidOptionException
from pyPaSWAS import pypaswasall
class Test(unittest.TestCase):
''' Runs the application as the end-user would, testing for correct exception handling
as well as final output checks comparing generated output to reference (curated)
output. '''
def setUp(self):
# Create pyPaSWAS instance
self.instance = pypaswasall.Pypaswas()
# Input files
self.input_faa_1 = resource_filename(__name__, 'data/query_fasta.faa')
self.input_faa_2 = resource_filename(__name__, 'data/target_fasta.faa')
self.input_gb_1 = resource_filename(__name__, 'data/query_genbank.gb')
self.input_gb_2 = resource_filename(__name__, 'data/target_genbank.gb')
self.outdir = tempfile.mkdtemp(prefix='test_pypaswas_aligner')
def test_pypaswas_aligner_invalid_options(self):
''' This test checks the raised exception when arguments are missing or incorrect '''
# Missing arguments, this should raise an InvalidOptionException
sys.argv = [__name__]
self.assertRaises(InvalidOptionException, self.instance.run)
# Trying to get output using the unsupported BAM output format
sys.argv = [__name__,
self.input_faa_1,
self.input_faa_2,
'--outputformat=BAM']
self.assertRaises(InvalidOptionException, self.instance.run)
def _defunct_test_pypaswas_aligner_basic(self):
''' Input two FASTA files and align them using all default settings.
Compares the output alignment with the included reference file. '''
# Expected output
reference = resource_filename(__name__, 'data/reference/aligner_basic_output.txt')
# Most basic alignment (default settings, default alignment output format)
outfile = '{}/basic_alignment.txt'.format(self.outdir)
sys.argv = [__name__,
self.input_faa_1,
self.input_faa_2,
'-o', outfile]
# Start pyPaSWAS using defined arguments in sys.argv
self.instance.run()
# Read output / reference file
expected = _read_file(reference)
actual = _read_file(outfile)
# Compare output with reference
self.assertEqual(actual, expected)
def test_pypaswas_aligner_basic_genbank(self):
''' Input two Genbank files and align them using all default settings.
Compares the output alignment with the included reference file. '''
# Expected output
reference = resource_filename(__name__, 'data/reference/aligner_basic_genbank_output.txt')
# Most basic alignment (default settings, default alignment output format)
outfile = '{}/basic_alignment_genbank.txt'.format(self.outdir)
sys.argv = [__name__,
self.input_gb_1,
self.input_gb_2,
# Set correct file types
'--filetype1', 'genbank',
'--filetype2', 'genbank',
'-o', outfile]
# Start pyPaSWAS using defined arguments in sys.argv
self.instance.run()
# Read output / reference file
expected = _read_file(reference)
actual = _read_file(outfile)
# Compare output with reference
self.assertEqual(actual, expected)
def test_pypaswas_aligner_indel(self):
pass
def test_pypaswas_aligner_sam(self):
''' Single test function to check complete SAM output format. The given input and
parameters include all possible alignment types which is compared to the reference
SAM file. '''
pass
def tearDown(self):
# Cleanup
shutil.rmtree(self.outdir)
def _read_file(filename):
''' Helper method to quickly read a file
@param filename: file to read '''
with open(filename) as handle:
return handle.read() | implementations/warris2018/pyPaSWAS/Core/test/test_pypaswasall.py | import sys
import unittest
import tempfile
import shutil
from pkg_resources import resource_filename #@UnresolvedImport
from pyPaSWAS.Core.Exceptions import InvalidOptionException
from pyPaSWAS import pypaswasall
class Test(unittest.TestCase):
''' Runs the application as the end-user would, testing for correct exception handling
as well as final output checks comparing generated output to reference (curated)
output. '''
def setUp(self):
# Create pyPaSWAS instance
self.instance = pypaswasall.Pypaswas()
# Input files
self.input_faa_1 = resource_filename(__name__, 'data/query_fasta.faa')
self.input_faa_2 = resource_filename(__name__, 'data/target_fasta.faa')
self.input_gb_1 = resource_filename(__name__, 'data/query_genbank.gb')
self.input_gb_2 = resource_filename(__name__, 'data/target_genbank.gb')
self.outdir = tempfile.mkdtemp(prefix='test_pypaswas_aligner')
def test_pypaswas_aligner_invalid_options(self):
''' This test checks the raised exception when arguments are missing or incorrect '''
# Missing arguments, this should raise an InvalidOptionException
sys.argv = [__name__]
self.assertRaises(InvalidOptionException, self.instance.run)
# Trying to get output using the unsupported BAM output format
sys.argv = [__name__,
self.input_faa_1,
self.input_faa_2,
'--outputformat=BAM']
self.assertRaises(InvalidOptionException, self.instance.run)
def _defunct_test_pypaswas_aligner_basic(self):
''' Input two FASTA files and align them using all default settings.
Compares the output alignment with the included reference file. '''
# Expected output
reference = resource_filename(__name__, 'data/reference/aligner_basic_output.txt')
# Most basic alignment (default settings, default alignment output format)
outfile = '{}/basic_alignment.txt'.format(self.outdir)
sys.argv = [__name__,
self.input_faa_1,
self.input_faa_2,
'-o', outfile]
# Start pyPaSWAS using defined arguments in sys.argv
self.instance.run()
# Read output / reference file
expected = _read_file(reference)
actual = _read_file(outfile)
# Compare output with reference
self.assertEqual(actual, expected)
def test_pypaswas_aligner_basic_genbank(self):
''' Input two Genbank files and align them using all default settings.
Compares the output alignment with the included reference file. '''
# Expected output
reference = resource_filename(__name__, 'data/reference/aligner_basic_genbank_output.txt')
# Most basic alignment (default settings, default alignment output format)
outfile = '{}/basic_alignment_genbank.txt'.format(self.outdir)
sys.argv = [__name__,
self.input_gb_1,
self.input_gb_2,
# Set correct file types
'--filetype1', 'genbank',
'--filetype2', 'genbank',
'-o', outfile]
# Start pyPaSWAS using defined arguments in sys.argv
self.instance.run()
# Read output / reference file
expected = _read_file(reference)
actual = _read_file(outfile)
# Compare output with reference
self.assertEqual(actual, expected)
def test_pypaswas_aligner_indel(self):
pass
def test_pypaswas_aligner_sam(self):
''' Single test function to check complete SAM output format. The given input and
parameters include all possible alignment types which is compared to the reference
SAM file. '''
pass
def tearDown(self):
# Cleanup
shutil.rmtree(self.outdir)
def _read_file(filename):
''' Helper method to quickly read a file
@param filename: file to read '''
with open(filename) as handle:
return handle.read() | 0.36659 | 0.305892 |
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import rabbitmq_class
import version
__version__ = version.__version__
class SetupExchange(object):
"""Class: SetupExchange
Description: Class stub holder for pika class.
Methods:
__init__
exchange_declare
"""
def __init__(self):
"""Function: __init__
Description: Stub holder for __init__ function.
Arguments:
"""
self.exchange = None
self.exchange_type = None
self.durable = None
def exchange_declare(self, exchange, exchange_type, durable):
"""Function: exchange_declare
Description: Stub holder for exchange_declare function.
Arguments:
exchange
exchange_type
durable
"""
self.exchange = exchange
self.exchange_type = exchange_type
self.durable = durable
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_setup_exchange
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = None
self.host = "ServerName"
self.port = 5555
self.connection = None
self.exchange_name = "Exchange_Name"
self.queue_name = "Queue_Name"
self.routing_key = "Route_Key"
self.auto_delete = True
@mock.patch("rabbitmq_class.pika")
def test_setup_exchange(self, mock_pika):
"""Function: test_setup_exchange
Description: Test setup_exchange method.
Arguments:
"""
mock_pika.PlainCredentials.return_value = "PlainCredentials"
mock_pika.ConnectionParameters.return_value = "ConnectionParameters"
rmq = rabbitmq_class.RabbitMQPub(self.name, "xxxxx")
rmq.channel = SetupExchange()
self.assertFalse(rmq.setup_exchange())
if __name__ == "__main__":
unittest.main() | test/unit/rabbitmq_class/rabbitmqpub_setupexchange.py | # Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import rabbitmq_class
import version
__version__ = version.__version__
class SetupExchange(object):
"""Class: SetupExchange
Description: Class stub holder for pika class.
Methods:
__init__
exchange_declare
"""
def __init__(self):
"""Function: __init__
Description: Stub holder for __init__ function.
Arguments:
"""
self.exchange = None
self.exchange_type = None
self.durable = None
def exchange_declare(self, exchange, exchange_type, durable):
"""Function: exchange_declare
Description: Stub holder for exchange_declare function.
Arguments:
exchange
exchange_type
durable
"""
self.exchange = exchange
self.exchange_type = exchange_type
self.durable = durable
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_setup_exchange
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = None
self.host = "ServerName"
self.port = 5555
self.connection = None
self.exchange_name = "Exchange_Name"
self.queue_name = "Queue_Name"
self.routing_key = "Route_Key"
self.auto_delete = True
@mock.patch("rabbitmq_class.pika")
def test_setup_exchange(self, mock_pika):
"""Function: test_setup_exchange
Description: Test setup_exchange method.
Arguments:
"""
mock_pika.PlainCredentials.return_value = "PlainCredentials"
mock_pika.ConnectionParameters.return_value = "ConnectionParameters"
rmq = rabbitmq_class.RabbitMQPub(self.name, "xxxxx")
rmq.channel = SetupExchange()
self.assertFalse(rmq.setup_exchange())
if __name__ == "__main__":
unittest.main() | 0.423696 | 0.308451 |
import copy
import unittest
import os
import subprocess
import sys
import tempfile
from screwdrivercd.screwdriver.metadata import Metadata
class ScrewdriverTestCase(unittest.TestCase):
"""
Test case class for testing screwdriver wrappers that perform common operations like saving/restoring the environment
variables for each test.
"""
cwd = None
orig_argv = None
orig_environ =None
tempdir = None
environ_keys = {
'BASE_PYTHON',
'CHANGELOG_FILENAME', 'CHANGELOG_ONLY_VERSION_TAGS', 'CHANGELOG_RELEASES',
'GIT_DEPLOY_KEY',
'PACKAGE_DIR', 'PACKAGE_DIRECTORY', 'PACKAGE_TAG',
'PUBLISH', 'PUBLISH_PYTHON', 'PUBLISH_PYTHON_FAIL_MISSING_CRED',
'PYPI_USER', 'PYPI_PASSWORD',
'PYROMA_MIN_SCORE',
'SD_ARTIFACTS_DIR', 'SD_BUILD', 'SD_BUILD_ID', 'SD_PULL_REQUEST',
'TEST_UTILITY_ENV_BOOL', 'TOX_ENVLIST', 'TOX_ARGS',
'VALIDATE_PACKAGE_QUALITY_FAIL_MISSING'
}
meta_version = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Save the original sys.argv, working directory and environment variable values so they can be modified
# freely in the tests and restored after each test.
self.orig_argv = sys.argv
self.cwd = os.getcwd()
self.orig_environ = copy.copy(os.environ)
# Save the value of the "package.version" from the screwdriver pipeline metadata if it is present
try:
self.meta_version = subprocess.check_output(['meta', 'get', 'package.version']).decode(errors='ignore').strip() # nosec
except (FileNotFoundError, subprocess.CalledProcessError):
pass
def setUp(self):
Metadata.read_only = True
self.tempdir = tempfile.TemporaryDirectory()
os.chdir(self.tempdir.name)
# Delete keys in the environ keys so they aren't set
self.delkeys(self.environ_keys)
# Create expected CI directories
self.artifacts_dir = os.path.join(self.tempdir.name, 'artifacts')
os.makedirs(self.artifacts_dir, exist_ok=True)
os.environ['SD_ARTIFACTS_DIR'] = self.artifacts_dir
# Make sure the value of SD_PULL_REQUEST is always unset
os.environ['SD_PULL_REQUEST'] = ''
def tearDown(self):
Metadata.read_only = False
# Restore sys.argv
if self.orig_argv:
sys.argv = self.orig_argv
# Return to the original working directory
if self.cwd:
os.chdir(self.cwd)
# Clean up the temporary directory
if self.tempdir:
self.tempdir.cleanup()
self.tempdir = None
# Reset the environment variables to the original values
for environ_key in self.environ_keys:
environ_value = self.orig_environ.get(environ_key, None)
if environ_value:
os.environ[environ_key] = self.orig_environ[environ_key]
elif environ_value is None and environ_key in os.environ.keys():
del os.environ['environ_key']
# Restore the package.version if it was saved
if self.meta_version and self.meta_version != 'null': # Make sure meta_version gets set back
try:
subprocess.check_call(['meta', 'set', 'package.version', self.meta_version]) # nosec
except FileNotFoundError:
pass
def delkeys(self, keys):
"""
Delete keys from the environment
Parameters
----------
keys: list
The environment keys to remove
"""
for key in keys:
try:
del os.environ[key]
except KeyError:
pass
def write_config_files(self, config_files):
for filename, contents in config_files.items():
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(filename, 'wb') as fh:
fh.write(contents)
def setupEmptyGit(self):
"""
Set up an empty git repo, in the current directory
"""
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'config', 'user.email', '<EMAIL>'])
subprocess.check_call(['git', 'config', 'user.name', 'foo'])
with open('setup.cfg', 'w') as setup_handle:
setup_handle.write('')
subprocess.check_call(['git', 'add', 'setup.cfg'])
subprocess.check_call(['git', 'commit', '-a', '-m', 'initial']) | tests/__init__.py | import copy
import unittest
import os
import subprocess
import sys
import tempfile
from screwdrivercd.screwdriver.metadata import Metadata
class ScrewdriverTestCase(unittest.TestCase):
"""
Test case class for testing screwdriver wrappers that perform common operations like saving/restoring the environment
variables for each test.
"""
cwd = None
orig_argv = None
orig_environ =None
tempdir = None
environ_keys = {
'BASE_PYTHON',
'CHANGELOG_FILENAME', 'CHANGELOG_ONLY_VERSION_TAGS', 'CHANGELOG_RELEASES',
'GIT_DEPLOY_KEY',
'PACKAGE_DIR', 'PACKAGE_DIRECTORY', 'PACKAGE_TAG',
'PUBLISH', 'PUBLISH_PYTHON', 'PUBLISH_PYTHON_FAIL_MISSING_CRED',
'PYPI_USER', 'PYPI_PASSWORD',
'PYROMA_MIN_SCORE',
'SD_ARTIFACTS_DIR', 'SD_BUILD', 'SD_BUILD_ID', 'SD_PULL_REQUEST',
'TEST_UTILITY_ENV_BOOL', 'TOX_ENVLIST', 'TOX_ARGS',
'VALIDATE_PACKAGE_QUALITY_FAIL_MISSING'
}
meta_version = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Save the original sys.argv, working directory and environment variable values so they can be modified
# freely in the tests and restored after each test.
self.orig_argv = sys.argv
self.cwd = os.getcwd()
self.orig_environ = copy.copy(os.environ)
# Save the value of the "package.version" from the screwdriver pipeline metadata if it is present
try:
self.meta_version = subprocess.check_output(['meta', 'get', 'package.version']).decode(errors='ignore').strip() # nosec
except (FileNotFoundError, subprocess.CalledProcessError):
pass
def setUp(self):
Metadata.read_only = True
self.tempdir = tempfile.TemporaryDirectory()
os.chdir(self.tempdir.name)
# Delete keys in the environ keys so they aren't set
self.delkeys(self.environ_keys)
# Create expected CI directories
self.artifacts_dir = os.path.join(self.tempdir.name, 'artifacts')
os.makedirs(self.artifacts_dir, exist_ok=True)
os.environ['SD_ARTIFACTS_DIR'] = self.artifacts_dir
# Make sure the value of SD_PULL_REQUEST is always unset
os.environ['SD_PULL_REQUEST'] = ''
def tearDown(self):
Metadata.read_only = False
# Restore sys.argv
if self.orig_argv:
sys.argv = self.orig_argv
# Return to the original working directory
if self.cwd:
os.chdir(self.cwd)
# Clean up the temporary directory
if self.tempdir:
self.tempdir.cleanup()
self.tempdir = None
# Reset the environment variables to the original values
for environ_key in self.environ_keys:
environ_value = self.orig_environ.get(environ_key, None)
if environ_value:
os.environ[environ_key] = self.orig_environ[environ_key]
elif environ_value is None and environ_key in os.environ.keys():
del os.environ['environ_key']
# Restore the package.version if it was saved
if self.meta_version and self.meta_version != 'null': # Make sure meta_version gets set back
try:
subprocess.check_call(['meta', 'set', 'package.version', self.meta_version]) # nosec
except FileNotFoundError:
pass
def delkeys(self, keys):
"""
Delete keys from the environment
Parameters
----------
keys: list
The environment keys to remove
"""
for key in keys:
try:
del os.environ[key]
except KeyError:
pass
def write_config_files(self, config_files):
for filename, contents in config_files.items():
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(filename, 'wb') as fh:
fh.write(contents)
def setupEmptyGit(self):
"""
Set up an empty git repo, in the current directory
"""
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'config', 'user.email', '<EMAIL>'])
subprocess.check_call(['git', 'config', 'user.name', 'foo'])
with open('setup.cfg', 'w') as setup_handle:
setup_handle.write('')
subprocess.check_call(['git', 'add', 'setup.cfg'])
subprocess.check_call(['git', 'commit', '-a', '-m', 'initial']) | 0.279435 | 0.118003 |
import pandas as pd
import numpy as np
import warnings
import sklearn.metrics as mt
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
# To avoid warnings
warnings.filterwarnings('ignore')
def read_data(path):
"""Read and return data."""
data = pd.read_csv(path)
return data
def data_prepare(dataset):
"""Puts data in order in a few steps.
1. Delete unused columns
2. Replace NaN's with means and most frequent
3. Replace str values with ints
4. Depersonalization of some data, bringing them to a vector form
Returns prepared dataset.
"""
# Delete unused columns
unused_columns = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'Fare']
data = dataset.drop(unused_columns, axis=1)
# Replace NaN's with means...
feature_list_1 = ['Age']
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
data[feature_list_1] = imputer.fit_transform(data[feature_list_1].astype('float64'))
# ...and most frequent
feature_list_2 = ['Survived', 'Pclass', 'SibSp', 'Parch']
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
data[feature_list_2] = imputer.fit_transform(data[feature_list_2].astype('float64'))
# Replace str values with ints
label_encoder_sex = LabelEncoder()
data['Sex'] = label_encoder_sex.fit_transform(data['Sex'].astype(str))
label_encoder_embarked = LabelEncoder()
data['Embarked'] = label_encoder_embarked.fit_transform(data['Embarked'].astype(str))
# Depersonalization of some data, bringing them to a vector form
# e.g. for Sex column will be created Sex_0 and Sex_1 columns
categorical_feature_list = ['Sex', 'Embarked', 'Pclass']
for feature in categorical_feature_list:
data[feature] = pd.Categorical(data[feature])
data_dummies = pd.get_dummies(data[feature], prefix=feature)
data = pd.concat([data, data_dummies], axis=1)
data = data.drop(labels=[feature], axis=1)
return data
def get_x_and_y(data):
"""Splits dataset into feature matrix X and vector valid answers y."""
feature_list = ['Age', 'SibSp', 'Parch',
'Sex_0', 'Sex_1', 'Embarked_0',
'Embarked_1', 'Embarked_2',
'Embarked_3', 'Pclass_1.0',
'Pclass_2.0', 'Pclass_3.0']
X = data[feature_list]
y = data[['Survived']]
return X, y
def cross_validation(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
return X_train, X_test, y_train, y_test
def train_KNN(X, y):
"""Train KNN."""
knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(X, y)
return knn
def train_LogReg(X, y):
"""Train LogReg."""
lg = LogisticRegression()
lg.fit(X, y)
return lg
def predict_KNN(knn_model, passenger):
"""Returns KNN pediction."""
prediction = knn_model.predict(passenger)
if prediction:
print('KNN: Passenger save!')
else:
print('KNN: Passenger die :(')
def predict_LogReg(lg_model, passenger):
"""Returns LogReg pediction."""
prediction = lg_model.predict(passenger)
if prediction == 1:
print('LogReg: Passenger save!')
else:
print('LogReg: Passenger die :(')
def model_passenger(X):
"""Modeling passenger."""
params = []
for column in X.columns:
param = int(input(f'{column}: '))
params.append(param)
passenger = np.array(params).reshape(1, -1)
return passenger
def print_metrics(knn_model, lg_model, X_test, y_test):
"""Print metric results of KNN and LogReg."""
prediction = knn_model.predict(X_test)
accuracy = mt.accuracy_score(y_test, prediction)
print(f'KNN metric: {100 * accuracy}')
prediction = lg_model.predict(X_test)
accuracy = mt.accuracy_score(y_test, prediction)
print(f'LogReg metric: {100 * accuracy}')
def test_KNN_and_LogReg(X, y):
"""KNN and LogReg accuracy test."""
X_train, X_test, y_train, y_test = cross_validation(X, y)
# Train models
knn_model = train_KNN(X_train, y_train)
lg_model = train_LogReg(X_train, y_train)
print_metrics(knn_model, lg_model, X_test, y_test)
def play_with_own_passenger(X, y):
"""Model user's passenger and predicts."""
# Train models
knn_model = train_KNN(X, y)
lg_model = train_LogReg(X, y)
# Model passenger
passenger = model_passenger(X)
# Predictions
predict_KNN(knn_model, passenger)
predict_LogReg(lg_model, passenger)
def main():
# Read data
dataset = read_data('data/train.csv')
# Data prepare
dataset = data_prepare(dataset)
# Get feature matrix and vector valid answers
X, y = get_x_and_y(dataset)
test_KNN_and_LogReg(X, y)
play_with_own_passenger(X, y)
if __name__ == '__main__':
main() | titanic.py | import pandas as pd
import numpy as np
import warnings
import sklearn.metrics as mt
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
# To avoid warnings
warnings.filterwarnings('ignore')
def read_data(path):
"""Read and return data."""
data = pd.read_csv(path)
return data
def data_prepare(dataset):
"""Puts data in order in a few steps.
1. Delete unused columns
2. Replace NaN's with means and most frequent
3. Replace str values with ints
4. Depersonalization of some data, bringing them to a vector form
Returns prepared dataset.
"""
# Delete unused columns
unused_columns = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'Fare']
data = dataset.drop(unused_columns, axis=1)
# Replace NaN's with means...
feature_list_1 = ['Age']
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
data[feature_list_1] = imputer.fit_transform(data[feature_list_1].astype('float64'))
# ...and most frequent
feature_list_2 = ['Survived', 'Pclass', 'SibSp', 'Parch']
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
data[feature_list_2] = imputer.fit_transform(data[feature_list_2].astype('float64'))
# Replace str values with ints
label_encoder_sex = LabelEncoder()
data['Sex'] = label_encoder_sex.fit_transform(data['Sex'].astype(str))
label_encoder_embarked = LabelEncoder()
data['Embarked'] = label_encoder_embarked.fit_transform(data['Embarked'].astype(str))
# Depersonalization of some data, bringing them to a vector form
# e.g. for Sex column will be created Sex_0 and Sex_1 columns
categorical_feature_list = ['Sex', 'Embarked', 'Pclass']
for feature in categorical_feature_list:
data[feature] = pd.Categorical(data[feature])
data_dummies = pd.get_dummies(data[feature], prefix=feature)
data = pd.concat([data, data_dummies], axis=1)
data = data.drop(labels=[feature], axis=1)
return data
def get_x_and_y(data):
"""Splits dataset into feature matrix X and vector valid answers y."""
feature_list = ['Age', 'SibSp', 'Parch',
'Sex_0', 'Sex_1', 'Embarked_0',
'Embarked_1', 'Embarked_2',
'Embarked_3', 'Pclass_1.0',
'Pclass_2.0', 'Pclass_3.0']
X = data[feature_list]
y = data[['Survived']]
return X, y
def cross_validation(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
return X_train, X_test, y_train, y_test
def train_KNN(X, y):
"""Train KNN."""
knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(X, y)
return knn
def train_LogReg(X, y):
"""Train LogReg."""
lg = LogisticRegression()
lg.fit(X, y)
return lg
def predict_KNN(knn_model, passenger):
"""Returns KNN pediction."""
prediction = knn_model.predict(passenger)
if prediction:
print('KNN: Passenger save!')
else:
print('KNN: Passenger die :(')
def predict_LogReg(lg_model, passenger):
"""Returns LogReg pediction."""
prediction = lg_model.predict(passenger)
if prediction == 1:
print('LogReg: Passenger save!')
else:
print('LogReg: Passenger die :(')
def model_passenger(X):
"""Modeling passenger."""
params = []
for column in X.columns:
param = int(input(f'{column}: '))
params.append(param)
passenger = np.array(params).reshape(1, -1)
return passenger
def print_metrics(knn_model, lg_model, X_test, y_test):
"""Print metric results of KNN and LogReg."""
prediction = knn_model.predict(X_test)
accuracy = mt.accuracy_score(y_test, prediction)
print(f'KNN metric: {100 * accuracy}')
prediction = lg_model.predict(X_test)
accuracy = mt.accuracy_score(y_test, prediction)
print(f'LogReg metric: {100 * accuracy}')
def test_KNN_and_LogReg(X, y):
"""KNN and LogReg accuracy test."""
X_train, X_test, y_train, y_test = cross_validation(X, y)
# Train models
knn_model = train_KNN(X_train, y_train)
lg_model = train_LogReg(X_train, y_train)
print_metrics(knn_model, lg_model, X_test, y_test)
def play_with_own_passenger(X, y):
"""Model user's passenger and predicts."""
# Train models
knn_model = train_KNN(X, y)
lg_model = train_LogReg(X, y)
# Model passenger
passenger = model_passenger(X)
# Predictions
predict_KNN(knn_model, passenger)
predict_LogReg(lg_model, passenger)
def main():
# Read data
dataset = read_data('data/train.csv')
# Data prepare
dataset = data_prepare(dataset)
# Get feature matrix and vector valid answers
X, y = get_x_and_y(dataset)
test_KNN_and_LogReg(X, y)
play_with_own_passenger(X, y)
if __name__ == '__main__':
main() | 0.735642 | 0.687663 |
import sys
from PyQt5.QtCore import Qt
from qtpy.QtGui import QDoubleValidator
from qtpy.QtWidgets import (
QCheckBox,
QFormLayout,
QLineEdit,
QVBoxLayout,
QComboBox,
QSlider,
QDialogButtonBox,
QDialog
)
MIN_FS = 100
MAX_FS = 1000
STEP_SIZE = 100
MIN_ALLOWABLE_FS = 0.0001
DECIMAL_PLACES = 4
SUPPORTED_CHANNEL_TYPES = ["", "ecg", "bio", "stim", "eog",
"misc", "seeg", "ecog", "mag",
"eeg", "ref_meg", "grad", "emg", "hbr", "hbo"]
class NpyDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
# initialize settings:
self.settings = {'ch_type': "misc", 'fs': None, 'standardize': False}
self.setWindowTitle("Parameters")
# Create layout for all items.
outer_form = QVBoxLayout()
# create form for the text box:
top_form = QFormLayout()
# Create a text box for reading the sample rate:
self.fs = QLineEdit()
self.fs.setValidator(
QDoubleValidator(
MIN_ALLOWABLE_FS,
sys.float_info.max,
DECIMAL_PLACES))
top_form.addRow("Sample Rate (Hz):", self.fs)
# initialize slider for fs:
self.fs_slider = QSlider(Qt.Horizontal)
self.fs_slider.setMinimum(MIN_FS)
self.fs_slider.setMaximum(MAX_FS)
self.fs_slider.setValue(MIN_FS)
self.fs_slider.setTickPosition(QSlider.TicksBelow)
self.fs_slider.setTickInterval(STEP_SIZE)
self.fs_slider.setSingleStep(STEP_SIZE)
self.fs_slider.valueChanged.connect(self.value_change)
# initialize dropdown for selecting channel type:
self.ch_type_dropdown = QComboBox()
self.ch_type_dropdown.addItems(SUPPORTED_CHANNEL_TYPES)
self.ch_type_dropdown.activated.connect(self.set_type)
# initialize checkbox for controlling standardization:
self.standardize = QCheckBox("Standardize Data")
# initialize accept/deny buttons:
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.buttonbox.accepted.connect(self.button_accept)
self.buttonbox.rejected.connect(self.reject)
# build dialog window:
outer_form.addLayout(top_form)
outer_form.addWidget(self.fs_slider)
outer_form.addWidget(self.ch_type_dropdown)
outer_form.addWidget(self.standardize)
outer_form.addWidget(self.buttonbox)
self.setLayout(outer_form)
def set_type(self):
"""
sets the channel type based off of the selected item in the dropdown
menu.
"""
self.settings['ch_type'] = self.ch_type_dropdown.currentText()
if self.settings['ch_type'] != "":
self.settings['ch_type'] = "misc"
def value_change(self):
"""
Sets the text bar to match the slider. Is only called when the slider
is used.
"""
self.fs.setText(str(self.fs_slider.value()))
def get_values(self):
"""
gets the settings from the dialog box
"""
return self.settings
def set_values(self):
"""
Takes the settings from the text box and checkbox, and stores them in
their respective settings.
In this case, sets the sample frequency and standardization flag.
"""
fs = self.fs.text()
if fs != "":
fs = float(fs)
self.settings['fs'] = fs
self.settings['standardize'] = self.standardize.isChecked()
def button_accept(self):
"""
function called when dialog is accepted. Sets all values before closing
the dialog window.
"""
self.set_values()
return self.accept() | mnelab/dialogs/npydialog.py | import sys
from PyQt5.QtCore import Qt
from qtpy.QtGui import QDoubleValidator
from qtpy.QtWidgets import (
QCheckBox,
QFormLayout,
QLineEdit,
QVBoxLayout,
QComboBox,
QSlider,
QDialogButtonBox,
QDialog
)
MIN_FS = 100
MAX_FS = 1000
STEP_SIZE = 100
MIN_ALLOWABLE_FS = 0.0001
DECIMAL_PLACES = 4
SUPPORTED_CHANNEL_TYPES = ["", "ecg", "bio", "stim", "eog",
"misc", "seeg", "ecog", "mag",
"eeg", "ref_meg", "grad", "emg", "hbr", "hbo"]
class NpyDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
# initialize settings:
self.settings = {'ch_type': "misc", 'fs': None, 'standardize': False}
self.setWindowTitle("Parameters")
# Create layout for all items.
outer_form = QVBoxLayout()
# create form for the text box:
top_form = QFormLayout()
# Create a text box for reading the sample rate:
self.fs = QLineEdit()
self.fs.setValidator(
QDoubleValidator(
MIN_ALLOWABLE_FS,
sys.float_info.max,
DECIMAL_PLACES))
top_form.addRow("Sample Rate (Hz):", self.fs)
# initialize slider for fs:
self.fs_slider = QSlider(Qt.Horizontal)
self.fs_slider.setMinimum(MIN_FS)
self.fs_slider.setMaximum(MAX_FS)
self.fs_slider.setValue(MIN_FS)
self.fs_slider.setTickPosition(QSlider.TicksBelow)
self.fs_slider.setTickInterval(STEP_SIZE)
self.fs_slider.setSingleStep(STEP_SIZE)
self.fs_slider.valueChanged.connect(self.value_change)
# initialize dropdown for selecting channel type:
self.ch_type_dropdown = QComboBox()
self.ch_type_dropdown.addItems(SUPPORTED_CHANNEL_TYPES)
self.ch_type_dropdown.activated.connect(self.set_type)
# initialize checkbox for controlling standardization:
self.standardize = QCheckBox("Standardize Data")
# initialize accept/deny buttons:
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.buttonbox.accepted.connect(self.button_accept)
self.buttonbox.rejected.connect(self.reject)
# build dialog window:
outer_form.addLayout(top_form)
outer_form.addWidget(self.fs_slider)
outer_form.addWidget(self.ch_type_dropdown)
outer_form.addWidget(self.standardize)
outer_form.addWidget(self.buttonbox)
self.setLayout(outer_form)
def set_type(self):
"""
sets the channel type based off of the selected item in the dropdown
menu.
"""
self.settings['ch_type'] = self.ch_type_dropdown.currentText()
if self.settings['ch_type'] != "":
self.settings['ch_type'] = "misc"
def value_change(self):
"""
Sets the text bar to match the slider. Is only called when the slider
is used.
"""
self.fs.setText(str(self.fs_slider.value()))
def get_values(self):
"""
gets the settings from the dialog box
"""
return self.settings
def set_values(self):
"""
Takes the settings from the text box and checkbox, and stores them in
their respective settings.
In this case, sets the sample frequency and standardization flag.
"""
fs = self.fs.text()
if fs != "":
fs = float(fs)
self.settings['fs'] = fs
self.settings['standardize'] = self.standardize.isChecked()
def button_accept(self):
"""
function called when dialog is accepted. Sets all values before closing
the dialog window.
"""
self.set_values()
return self.accept() | 0.533154 | 0.129403 |
from webthing import Property, Thing, Value
from internet_monitor_webthing.connectivity_monitor import ConnectionInfo, ConnectionLog, ConnectionTester
import tornado.ioloop
class InternetConnectivityMonitorWebthing(Thing):
# regarding capabilities refer https://iot.mozilla.org/schemas
# there is also another schema registry http://iotschema.org/docs/full.html not used by webthing
def __init__(self, description: str, connecttest_period: int, connecttest_url: str):
Thing.__init__(
self,
'urn:dev:ops:connectivitymonitor-1',
'Internet Connectivity Monitor',
['MultiLevelSensor'],
description
)
self.connection_log = ConnectionLog()
self.connecttest_period = connecttest_period
self.internet_connected = Value(False)
self.add_property(
Property(self,
'connected',
self.internet_connected,
metadata={
'@type': 'BooleanProperty',
'title': 'Internet is connected',
"type": "boolean",
'description': 'Whether the internet is connected',
'readOnly': True,
}))
self.event_date = Value("")
self.add_property(
Property(self,
'time',
self.event_date,
metadata={
'title': 'Updated time',
"type": "string",
'unit': 'datetime',
'description': 'The ISO 8601 date time of last state update',
'readOnly': True,
}))
self.ip_address = Value("")
self.add_property(
Property(self,
'ip_address',
self.ip_address,
metadata={
'title': 'Public IP address',
'type': 'string',
'description': 'The public WAN IP address used for internet connection',
'readOnly': True,
}))
self.asn = Value("")
self.add_property(
Property(self,
'asn',
self.asn,
metadata={
'title': 'Internet service provider',
'type': 'string',
'description': 'The name of the internet service provider providing the public WAN IP address ',
'readOnly': True,
}))
self.test_url = Value(connecttest_url)
self.add_property(
Property(self,
'connection_test_url',
self.test_url,
metadata={
'title': 'Internet connection test url',
"type": "string",
'description': 'The url to connect',
'readOnly': True,
}))
self.testperiod = Value(connecttest_period)
self.add_property(
Property(self,
'connection_test_period',
self.testperiod,
metadata={
'@type': 'LevelProperty',
'title': 'Internet connection test execution period in seconds',
'type': 'number',
'description': 'The Internet connection test execution period in seconds',
'unit': 'sec',
'readOnly': True,
}))
self.ioloop = tornado.ioloop.IOLoop.current()
self.tester = ConnectionTester(self.connection_log)
self.tester.listen(self.__connection_state_updated, self.testperiod.get(), self.test_url.get())
def __connection_state_updated(self, connection_info: ConnectionInfo):
if connection_info is not None:
self.ioloop.add_callback(self.__update_connected_props, connection_info)
def __update_connected_props(self, connection_info: ConnectionInfo):
self.internet_connected.notify_of_external_update(connection_info.is_connected)
self.event_date.notify_of_external_update(connection_info.date.isoformat())
self.ip_address.notify_of_external_update(connection_info.ip_address)
self.asn.notify_of_external_update(connection_info.ip_info['asn'][:40]) | internet_monitor_webthing/connectivity_monitor_webthing.py | from webthing import Property, Thing, Value
from internet_monitor_webthing.connectivity_monitor import ConnectionInfo, ConnectionLog, ConnectionTester
import tornado.ioloop
class InternetConnectivityMonitorWebthing(Thing):
# regarding capabilities refer https://iot.mozilla.org/schemas
# there is also another schema registry http://iotschema.org/docs/full.html not used by webthing
def __init__(self, description: str, connecttest_period: int, connecttest_url: str):
Thing.__init__(
self,
'urn:dev:ops:connectivitymonitor-1',
'Internet Connectivity Monitor',
['MultiLevelSensor'],
description
)
self.connection_log = ConnectionLog()
self.connecttest_period = connecttest_period
self.internet_connected = Value(False)
self.add_property(
Property(self,
'connected',
self.internet_connected,
metadata={
'@type': 'BooleanProperty',
'title': 'Internet is connected',
"type": "boolean",
'description': 'Whether the internet is connected',
'readOnly': True,
}))
self.event_date = Value("")
self.add_property(
Property(self,
'time',
self.event_date,
metadata={
'title': 'Updated time',
"type": "string",
'unit': 'datetime',
'description': 'The ISO 8601 date time of last state update',
'readOnly': True,
}))
self.ip_address = Value("")
self.add_property(
Property(self,
'ip_address',
self.ip_address,
metadata={
'title': 'Public IP address',
'type': 'string',
'description': 'The public WAN IP address used for internet connection',
'readOnly': True,
}))
self.asn = Value("")
self.add_property(
Property(self,
'asn',
self.asn,
metadata={
'title': 'Internet service provider',
'type': 'string',
'description': 'The name of the internet service provider providing the public WAN IP address ',
'readOnly': True,
}))
self.test_url = Value(connecttest_url)
self.add_property(
Property(self,
'connection_test_url',
self.test_url,
metadata={
'title': 'Internet connection test url',
"type": "string",
'description': 'The url to connect',
'readOnly': True,
}))
self.testperiod = Value(connecttest_period)
self.add_property(
Property(self,
'connection_test_period',
self.testperiod,
metadata={
'@type': 'LevelProperty',
'title': 'Internet connection test execution period in seconds',
'type': 'number',
'description': 'The Internet connection test execution period in seconds',
'unit': 'sec',
'readOnly': True,
}))
self.ioloop = tornado.ioloop.IOLoop.current()
self.tester = ConnectionTester(self.connection_log)
self.tester.listen(self.__connection_state_updated, self.testperiod.get(), self.test_url.get())
def __connection_state_updated(self, connection_info: ConnectionInfo):
if connection_info is not None:
self.ioloop.add_callback(self.__update_connected_props, connection_info)
def __update_connected_props(self, connection_info: ConnectionInfo):
self.internet_connected.notify_of_external_update(connection_info.is_connected)
self.event_date.notify_of_external_update(connection_info.date.isoformat())
self.ip_address.notify_of_external_update(connection_info.ip_address)
self.asn.notify_of_external_update(connection_info.ip_info['asn'][:40]) | 0.667364 | 0.199952 |
from __future__ import print_function
from keras.models import Model, model_from_json
from keras.layers import Dense, Activation, Dropout, Input, Embedding
from keras.layers import CuDNNLSTM as LSTM
from keras.optimizers import RMSprop, Adam
from keras.utils.data_utils import get_file
from keras.callbacks import ModelCheckpoint
import numpy as np
import random
import sys
import re
### Only needed for me, not to block the whole GPU, you don't need this stuff
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
### ---end of weird stuff
## helper functions
from nltk.tokenize import word_tokenize # turns text into list of words
def load_vocabulary(file_name):
import json
with open(file_name, "rt", encoding="utf-8") as f:
vocab=json.load(f)
return vocab
def load_model(model_file, weight_file):
with open(model_file, "rt", encoding="utf-8") as f:
model=model_from_json(f.read())
model.load_weights(weight_file)
return model
vocab_file="generation-vocab.json"
model_file="generation-model.json"
weight_file="generation-weights.h5"
vocab, _ = load_vocabulary(vocab_file)
model = load_model(model_file, weight_file)
print("Vocabulary size:", len(vocab))
inversed_vocab = {value: key for key, value in vocab.items()}
print("Inversed vocabulary size:", len(inversed_vocab))
print(vocab, inversed_vocab)
context_size=50
embedding_size=50
batch_size=150
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# generate
while True:
print()
print('-' * 50)
text = input("Seed for generation:").strip()
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = []
for c in text:
generated.append(c)
print('----- Generating with seed: "' + "".join(generated) + '"')
sys.stdout.write("".join(generated))
sentence=generated
# vectorize seed
generate_X=np.zeros((1,context_size))
for i,c in enumerate(sentence):
generate_X[0,i]=vocab.get(c,vocab["<UNKNOWN>"])
for i in range(200):
# predict
preds = model.predict(generate_X, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = inversed_vocab[next_index]
generated += [next_char]
sentence=generated[len(generated)-context_size:]
# vectorize new seed
generate_X=np.zeros((1,context_size))
for i,c in enumerate(sentence):
generate_X[0,i]=vocab.get(c,vocab["<UNKNOWN>"])
sys.stdout.write(next_char)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
print() | generate.py |
from __future__ import print_function
from keras.models import Model, model_from_json
from keras.layers import Dense, Activation, Dropout, Input, Embedding
from keras.layers import CuDNNLSTM as LSTM
from keras.optimizers import RMSprop, Adam
from keras.utils.data_utils import get_file
from keras.callbacks import ModelCheckpoint
import numpy as np
import random
import sys
import re
### Only needed for me, not to block the whole GPU, you don't need this stuff
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
### ---end of weird stuff
## helper functions
from nltk.tokenize import word_tokenize # turns text into list of words
def load_vocabulary(file_name):
import json
with open(file_name, "rt", encoding="utf-8") as f:
vocab=json.load(f)
return vocab
def load_model(model_file, weight_file):
with open(model_file, "rt", encoding="utf-8") as f:
model=model_from_json(f.read())
model.load_weights(weight_file)
return model
vocab_file="generation-vocab.json"
model_file="generation-model.json"
weight_file="generation-weights.h5"
vocab, _ = load_vocabulary(vocab_file)
model = load_model(model_file, weight_file)
print("Vocabulary size:", len(vocab))
inversed_vocab = {value: key for key, value in vocab.items()}
print("Inversed vocabulary size:", len(inversed_vocab))
print(vocab, inversed_vocab)
context_size=50
embedding_size=50
batch_size=150
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# generate
while True:
print()
print('-' * 50)
text = input("Seed for generation:").strip()
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = []
for c in text:
generated.append(c)
print('----- Generating with seed: "' + "".join(generated) + '"')
sys.stdout.write("".join(generated))
sentence=generated
# vectorize seed
generate_X=np.zeros((1,context_size))
for i,c in enumerate(sentence):
generate_X[0,i]=vocab.get(c,vocab["<UNKNOWN>"])
for i in range(200):
# predict
preds = model.predict(generate_X, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = inversed_vocab[next_index]
generated += [next_char]
sentence=generated[len(generated)-context_size:]
# vectorize new seed
generate_X=np.zeros((1,context_size))
for i,c in enumerate(sentence):
generate_X[0,i]=vocab.get(c,vocab["<UNKNOWN>"])
sys.stdout.write(next_char)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
print() | 0.629547 | 0.214794 |
import requests
import os
import re
import pandas as pd
from bs4 import BeautifulSoup
import json
from globals import teams_list
roster_url = 'https://www.espn.com/college-football/team/roster/_/id/'
schedule_url = 'https://www.espn.com/college-football/team/schedule/_/id/'
def scrape_roster(team_id):
roster_res = requests.get(roster_url + team_id)
roster_dfs = pd.read_html(roster_res.text)
roster_df = roster_dfs[0].append(roster_dfs[1], ignore_index=True).append(
roster_dfs[2], ignore_index=True
)
roster_df = roster_df.drop(columns=['Unnamed: 0'])
roster_dict = roster_df.to_dict()
col_labels = {
'Name': 'name',
'POS': 'pos',
'HT': 'height',
'WT': 'weight',
'Class': 'class',
'Birthplace': 'birthplace'
}
new_dict = {}
for key in roster_dict:
new_dict[col_labels[key]] = roster_dict[key]
new_dict['number'] = {}
for index, value in new_dict['name'].items():
try:
number_index = re.search('[0-9]{1,2}', value).span()[0]
except AttributeError:
new_dict['number'][index] = None
continue
new_dict['number'][index] = value[number_index:]
new_dict['name'][index] = value[:number_index]
for index, value in new_dict['height'].items():
split_height = value.split('\'')
feet = split_height[0]
try:
feet = int(feet)
except IndexError:
new_dict['height'][index] = None
continue
except ValueError:
new_dict['height'][index] = None
continue
inches = split_height[1].replace('"', '')
inches = int(inches) / 12
new_dict['height'][index] = feet + inches
for index, value in new_dict['weight'].items():
new_dict['weight'][index] = value.replace(' lbs', '')
return new_dict
def scrape_schedule(team_id):
schedule_res = requests.get(schedule_url + team_id)
schedule_df = pd.read_html(schedule_res.text, header=1)[0]
schedule_dict = schedule_df.to_dict()
col_labels = {
'DATE': 'date',
'OPPONENT': 'opponent',
'RESULT': 'result',
'W-L (CONF)': 'win_loss'
}
new_dict = {}
for key in schedule_dict:
try:
new_dict[col_labels[key]] = schedule_dict[key]
except KeyError:
pass
cutoff_index = None
for key, value in new_dict['date'].items():
if value == 'DATE':
cutoff_index = key
for index in range(cutoff_index, len(new_dict['date'])):
for key in new_dict:
del new_dict[key][index]
return new_dict
def scrape_plays(id):
base_url = "http://cdn.espn.com/core/college-football/playbyplay"
params = {
'gameId': id,
'xhr': 1,
'render': "false",
'userab': 18
}
r = requests.get(base_url, params=params)
game_json = json.loads(r.content)['gamepackageJSON']
drives = game_json['drives']['previous']
plays = []
for drive in drives:
plays.extend(drive['plays'])
return plays
def scrape_stats(team):
r = requests.get(
schedule_url + team['id']
)
soup = BeautifulSoup(r.text, 'html.parser')
schedule_table = soup.find_all('table')[0]
game_links = []
for tag in schedule_table.find_all('a'):
if re.search('game', tag['href']):
game_links.append(tag['href'])
team_games = {}
for i in range(0, len(game_links)):
link = game_links[i]
game_id = re.search('gameId/[0-9]+', link).group().replace('gameId/',
'')
try:
plays = scrape_plays(game_id)
except KeyError:
continue
team_games[game_id] = plays
return team_games
def scrape_team_data(team):
team_roster = scrape_roster(team['id'])
team_schedule = scrape_schedule(team['id'])
team_stats = scrape_stats(team)
return team_roster, team_schedule, team_stats
def scrape_all_data():
roster = {}
schedule = {}
stats = {}
for i in range(0, len(teams_list)):
team = teams_list[i]
print('Scraping team {} data...'.format(i + 1))
team_roster, team_schedule, team_stats = scrape_team_data(team)
roster[team['id']] = team_roster
schedule[team['id']] = team_schedule
stats[team['id']] = team_stats
return roster, schedule, stats
try:
os.mkdir('data')
except FileExistsError:
pass
def write_file(path, data):
with open(path, 'w') as f:
f.write(json.dumps(data))
f.close()
if __name__ == '__main__':
roster, schedule, stats = scrape_all_data()
print('Writing files...')
write_file('data/schedule.json', schedule)
write_file('data/roster.json', roster)
write_file('data/stats.json', stats) | fetch_stats.py | import requests
import os
import re
import pandas as pd
from bs4 import BeautifulSoup
import json
from globals import teams_list
roster_url = 'https://www.espn.com/college-football/team/roster/_/id/'
schedule_url = 'https://www.espn.com/college-football/team/schedule/_/id/'
def scrape_roster(team_id):
roster_res = requests.get(roster_url + team_id)
roster_dfs = pd.read_html(roster_res.text)
roster_df = roster_dfs[0].append(roster_dfs[1], ignore_index=True).append(
roster_dfs[2], ignore_index=True
)
roster_df = roster_df.drop(columns=['Unnamed: 0'])
roster_dict = roster_df.to_dict()
col_labels = {
'Name': 'name',
'POS': 'pos',
'HT': 'height',
'WT': 'weight',
'Class': 'class',
'Birthplace': 'birthplace'
}
new_dict = {}
for key in roster_dict:
new_dict[col_labels[key]] = roster_dict[key]
new_dict['number'] = {}
for index, value in new_dict['name'].items():
try:
number_index = re.search('[0-9]{1,2}', value).span()[0]
except AttributeError:
new_dict['number'][index] = None
continue
new_dict['number'][index] = value[number_index:]
new_dict['name'][index] = value[:number_index]
for index, value in new_dict['height'].items():
split_height = value.split('\'')
feet = split_height[0]
try:
feet = int(feet)
except IndexError:
new_dict['height'][index] = None
continue
except ValueError:
new_dict['height'][index] = None
continue
inches = split_height[1].replace('"', '')
inches = int(inches) / 12
new_dict['height'][index] = feet + inches
for index, value in new_dict['weight'].items():
new_dict['weight'][index] = value.replace(' lbs', '')
return new_dict
def scrape_schedule(team_id):
schedule_res = requests.get(schedule_url + team_id)
schedule_df = pd.read_html(schedule_res.text, header=1)[0]
schedule_dict = schedule_df.to_dict()
col_labels = {
'DATE': 'date',
'OPPONENT': 'opponent',
'RESULT': 'result',
'W-L (CONF)': 'win_loss'
}
new_dict = {}
for key in schedule_dict:
try:
new_dict[col_labels[key]] = schedule_dict[key]
except KeyError:
pass
cutoff_index = None
for key, value in new_dict['date'].items():
if value == 'DATE':
cutoff_index = key
for index in range(cutoff_index, len(new_dict['date'])):
for key in new_dict:
del new_dict[key][index]
return new_dict
def scrape_plays(id):
base_url = "http://cdn.espn.com/core/college-football/playbyplay"
params = {
'gameId': id,
'xhr': 1,
'render': "false",
'userab': 18
}
r = requests.get(base_url, params=params)
game_json = json.loads(r.content)['gamepackageJSON']
drives = game_json['drives']['previous']
plays = []
for drive in drives:
plays.extend(drive['plays'])
return plays
def scrape_stats(team):
r = requests.get(
schedule_url + team['id']
)
soup = BeautifulSoup(r.text, 'html.parser')
schedule_table = soup.find_all('table')[0]
game_links = []
for tag in schedule_table.find_all('a'):
if re.search('game', tag['href']):
game_links.append(tag['href'])
team_games = {}
for i in range(0, len(game_links)):
link = game_links[i]
game_id = re.search('gameId/[0-9]+', link).group().replace('gameId/',
'')
try:
plays = scrape_plays(game_id)
except KeyError:
continue
team_games[game_id] = plays
return team_games
def scrape_team_data(team):
team_roster = scrape_roster(team['id'])
team_schedule = scrape_schedule(team['id'])
team_stats = scrape_stats(team)
return team_roster, team_schedule, team_stats
def scrape_all_data():
roster = {}
schedule = {}
stats = {}
for i in range(0, len(teams_list)):
team = teams_list[i]
print('Scraping team {} data...'.format(i + 1))
team_roster, team_schedule, team_stats = scrape_team_data(team)
roster[team['id']] = team_roster
schedule[team['id']] = team_schedule
stats[team['id']] = team_stats
return roster, schedule, stats
try:
os.mkdir('data')
except FileExistsError:
pass
def write_file(path, data):
with open(path, 'w') as f:
f.write(json.dumps(data))
f.close()
if __name__ == '__main__':
roster, schedule, stats = scrape_all_data()
print('Writing files...')
write_file('data/schedule.json', schedule)
write_file('data/roster.json', roster)
write_file('data/stats.json', stats) | 0.184547 | 0.174551 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.feature_engineer import count_categorical
from utils.feature_engineer import agg_numeric
from utils.feature_engineer import missing_values_table
from utils.feature_engineer import light_gbm
import warnings
warnings.filterwarnings("ignore")
def feature_engineer(train, test, bureau, bureau_balance, credit_card_balance,
installments_payments, pos_cash_balance, previous_application):
"""
This function read all the data from the competition and do manual feature engineer to it.
:param train:
:param test:
:param bureau:
:param bureau_balance:
:param credit_card_balance:
:param installments_payments:
:param pos_cash_balance:
:param previous_application:
:return: (Dataframe) train
(Datafarme) test
"""
bureau_counts = count_categorical(bureau, group_var='SK_ID_CURR', df_name='bureau')
bureau_agg = agg_numeric(bureau.drop(columns=['SK_ID_BUREAU']), group_var='SK_ID_CURR', df_name='bureau')
bureau_balance_counts = count_categorical(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
bureau_balance_agg = agg_numeric(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
credit_card_balance_counts = count_categorical(credit_card_balance,
group_var='SK_ID_CURR', df_name='credit_card_balance')
credit_card_balance_agg = agg_numeric(credit_card_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='credit_card_balance')
# Reason: Installments_payments_counts table contains no object value.
# installments_payments_counts = count_categorical(installments_payments,
# group_var='SK_ID_CURR', df_name='installments_payments')
installments_payments_agg = agg_numeric(installments_payments.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='installments_payments')
pos_cash_balance_counts = count_categorical(pos_cash_balance, group_var='SK_ID_CURR', df_name='pos_cash_balance')
pos_cash_balance_agg = agg_numeric(pos_cash_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='pos_cash_balance')
previous_application_counts = count_categorical(previous_application,
group_var='SK_ID_CURR', df_name='previous_application_counts')
previous_application_agg = agg_numeric(previous_application.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='previous_application')
# Dataframe grouped by the loan
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts,
right_index=True, left_on='SK_ID_BUREAU', how='outer')
# Merge to include the SK_ID_CURR
bureau_by_loan = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].merge(bureau_by_loan, on='SK_ID_BUREAU', how='left')
# Aggregate the stats for each client
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns=['SK_ID_BUREAU']),
group_var='SK_ID_CURR', df_name='client')
original_features = list(train.columns)
print('Original Number of Features: ', len(original_features))
# TODO: We can also first deal with pos_cash_balance and credit card balance before merge.
# Merge with the value counts of bureau
train = train.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
train = train.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
train = train.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
train = train.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
train = train.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
train = train.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
train = train.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
train = train.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
train = train.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
train = train.merge(previous_application_agg, on='SK_ID_CURR', how='left')
new_features = list(train.columns)
print('Number of features using previous loans from other institutions data: ', len(new_features))
missing_train = missing_values_table(train)
missing_train_vars = list(missing_train.index[missing_train['% of Total Values'] > 90])
# Test
# Merge with the value counts of bureau
test = test.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
test = test.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
test = test.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
test = test.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
test = test.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
test = test.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
test = test.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
test = test.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
test = test.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
test = test.merge(previous_application_agg, on='SK_ID_CURR', how='left')
print('Shape of Training Data: ', train.shape)
print('Shape of Testing Data: ', test.shape)
train_labels = train['TARGET']
# Align the dataframes, this will remove the 'TARGET' column
train, test = train.align(test, join='inner', axis=1)
train['TARGET'] = train_labels
print('Training Data Shape: ', train.shape)
print('Testing Data Shape ', test.shape)
missing_test = missing_values_table(test)
missing_test_vars = list(missing_test.index[missing_test['% of Total Values'] > 90])
len(missing_test_vars)
missing_columns = list(set(missing_test_vars + missing_train_vars))
print('There are %d columns with more than 90%% missing in either the training or testing data.'
% len(missing_columns))
# Drop the missing columns
train = train.drop(columns=missing_columns)
test = test.drop(columns=missing_columns)
train.to_csv('train_all_raw.csv', index=False)
test.to_csv('test_all_raw.csv', index=False)
# Calculate all correlations in dataframe
corrs = train.corr()
corrs = corrs.sort_values('TARGET', ascending=False)
# Set the threshold
threshold = 0.8
# Empty dictionary to hold correlated variables
above_threshold_vars = {}
# For each column, record the variables that are above the threshold
for col in corrs:
above_threshold_vars[col] = list(corrs.index[corrs[col] > threshold])
# Track columns to remove and columns already examined
cols_to_remove = []
cols_seen = []
cols_to_remove_paire = []
# Iterate through columns and correlated columns
for key, value in above_threshold_vars.items():
# Keep track of columns already examined
cols_seen.append(key)
for x in value:
if x == key:
next
else:
# Only want to remove on in a pair
if x not in cols_seen:
cols_to_remove.append(x)
cols_to_remove_paire.append(key)
cols_to_remove = list(set(cols_to_remove))
print('Number of columns to remove: ', len(cols_to_remove))
train_corrs_removed = train.drop(columns=cols_to_remove)
test_corrs_removed = test.drop(columns=cols_to_remove)
print('Training Corrs Removed Shape: ', train_corrs_removed.shape)
print('Test Corrs Removed ShapeL ', test_corrs_removed.shape)
train_corrs_removed.to_csv('train_all_corrs_removed.csv', index=False)
test_corrs_removed.to_csv('test_all_corrs_removed.csv', index=False)
return train_corrs_removed, test_corrs_removed | scripts/feature_engineer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.feature_engineer import count_categorical
from utils.feature_engineer import agg_numeric
from utils.feature_engineer import missing_values_table
from utils.feature_engineer import light_gbm
import warnings
warnings.filterwarnings("ignore")
def feature_engineer(train, test, bureau, bureau_balance, credit_card_balance,
installments_payments, pos_cash_balance, previous_application):
"""
This function read all the data from the competition and do manual feature engineer to it.
:param train:
:param test:
:param bureau:
:param bureau_balance:
:param credit_card_balance:
:param installments_payments:
:param pos_cash_balance:
:param previous_application:
:return: (Dataframe) train
(Datafarme) test
"""
bureau_counts = count_categorical(bureau, group_var='SK_ID_CURR', df_name='bureau')
bureau_agg = agg_numeric(bureau.drop(columns=['SK_ID_BUREAU']), group_var='SK_ID_CURR', df_name='bureau')
bureau_balance_counts = count_categorical(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
bureau_balance_agg = agg_numeric(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
credit_card_balance_counts = count_categorical(credit_card_balance,
group_var='SK_ID_CURR', df_name='credit_card_balance')
credit_card_balance_agg = agg_numeric(credit_card_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='credit_card_balance')
# Reason: Installments_payments_counts table contains no object value.
# installments_payments_counts = count_categorical(installments_payments,
# group_var='SK_ID_CURR', df_name='installments_payments')
installments_payments_agg = agg_numeric(installments_payments.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='installments_payments')
pos_cash_balance_counts = count_categorical(pos_cash_balance, group_var='SK_ID_CURR', df_name='pos_cash_balance')
pos_cash_balance_agg = agg_numeric(pos_cash_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='pos_cash_balance')
previous_application_counts = count_categorical(previous_application,
group_var='SK_ID_CURR', df_name='previous_application_counts')
previous_application_agg = agg_numeric(previous_application.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='previous_application')
# Dataframe grouped by the loan
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts,
right_index=True, left_on='SK_ID_BUREAU', how='outer')
# Merge to include the SK_ID_CURR
bureau_by_loan = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].merge(bureau_by_loan, on='SK_ID_BUREAU', how='left')
# Aggregate the stats for each client
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns=['SK_ID_BUREAU']),
group_var='SK_ID_CURR', df_name='client')
original_features = list(train.columns)
print('Original Number of Features: ', len(original_features))
# TODO: We can also first deal with pos_cash_balance and credit card balance before merge.
# Merge with the value counts of bureau
train = train.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
train = train.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
train = train.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
train = train.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
train = train.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
train = train.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
train = train.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
train = train.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
train = train.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
train = train.merge(previous_application_agg, on='SK_ID_CURR', how='left')
new_features = list(train.columns)
print('Number of features using previous loans from other institutions data: ', len(new_features))
missing_train = missing_values_table(train)
missing_train_vars = list(missing_train.index[missing_train['% of Total Values'] > 90])
# Test
# Merge with the value counts of bureau
test = test.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
test = test.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
test = test.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
test = test.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
test = test.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
test = test.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
test = test.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
test = test.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
test = test.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
test = test.merge(previous_application_agg, on='SK_ID_CURR', how='left')
print('Shape of Training Data: ', train.shape)
print('Shape of Testing Data: ', test.shape)
train_labels = train['TARGET']
# Align the dataframes, this will remove the 'TARGET' column
train, test = train.align(test, join='inner', axis=1)
train['TARGET'] = train_labels
print('Training Data Shape: ', train.shape)
print('Testing Data Shape ', test.shape)
missing_test = missing_values_table(test)
missing_test_vars = list(missing_test.index[missing_test['% of Total Values'] > 90])
len(missing_test_vars)
missing_columns = list(set(missing_test_vars + missing_train_vars))
print('There are %d columns with more than 90%% missing in either the training or testing data.'
% len(missing_columns))
# Drop the missing columns
train = train.drop(columns=missing_columns)
test = test.drop(columns=missing_columns)
train.to_csv('train_all_raw.csv', index=False)
test.to_csv('test_all_raw.csv', index=False)
# Calculate all correlations in dataframe
corrs = train.corr()
corrs = corrs.sort_values('TARGET', ascending=False)
# Set the threshold
threshold = 0.8
# Empty dictionary to hold correlated variables
above_threshold_vars = {}
# For each column, record the variables that are above the threshold
for col in corrs:
above_threshold_vars[col] = list(corrs.index[corrs[col] > threshold])
# Track columns to remove and columns already examined
cols_to_remove = []
cols_seen = []
cols_to_remove_paire = []
# Iterate through columns and correlated columns
for key, value in above_threshold_vars.items():
# Keep track of columns already examined
cols_seen.append(key)
for x in value:
if x == key:
next
else:
# Only want to remove on in a pair
if x not in cols_seen:
cols_to_remove.append(x)
cols_to_remove_paire.append(key)
cols_to_remove = list(set(cols_to_remove))
print('Number of columns to remove: ', len(cols_to_remove))
train_corrs_removed = train.drop(columns=cols_to_remove)
test_corrs_removed = test.drop(columns=cols_to_remove)
print('Training Corrs Removed Shape: ', train_corrs_removed.shape)
print('Test Corrs Removed ShapeL ', test_corrs_removed.shape)
train_corrs_removed.to_csv('train_all_corrs_removed.csv', index=False)
test_corrs_removed.to_csv('test_all_corrs_removed.csv', index=False)
return train_corrs_removed, test_corrs_removed | 0.542863 | 0.421314 |
import pprint
import re # noqa: F401
import six
class ResourceServerIdentityHeadersJwt(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'certificate': 'list[str]',
'hdr_name': 'str',
'claims': 'list[ResourceServerIdentityHeadersJwtClaims]'
}
attribute_map = {
'certificate': 'certificate',
'hdr_name': 'hdr_name',
'claims': 'claims'
}
def __init__(self, certificate=None, hdr_name='jwt', claims=None): # noqa: E501
"""ResourceServerIdentityHeadersJwt - a model defined in OpenAPI""" # noqa: E501
self._certificate = None
self._hdr_name = None
self._claims = None
self.discriminator = None
if certificate is not None:
self.certificate = certificate
if hdr_name is not None:
self.hdr_name = hdr_name
if claims is not None:
self.claims = claims
@property
def certificate(self):
"""Gets the certificate of this ResourceServerIdentityHeadersJwt. # noqa: E501
PEM based personal certificate files which will be used to sign the JWT. These certificate files should include the private key, a certificate signed with the private key, and the signer certificate or signer certificate chain (if required). # noqa: E501
:return: The certificate of this ResourceServerIdentityHeadersJwt. # noqa: E501
:rtype: list[str]
"""
return self._certificate
@certificate.setter
def certificate(self, certificate):
"""Sets the certificate of this ResourceServerIdentityHeadersJwt.
PEM based personal certificate files which will be used to sign the JWT. These certificate files should include the private key, a certificate signed with the private key, and the signer certificate or signer certificate chain (if required). # noqa: E501
:param certificate: The certificate of this ResourceServerIdentityHeadersJwt. # noqa: E501
:type: list[str]
"""
self._certificate = certificate
@property
def hdr_name(self):
"""Gets the hdr_name of this ResourceServerIdentityHeadersJwt. # noqa: E501
The name of the HTTP header which will contain the generated JWT. # noqa: E501
:return: The hdr_name of this ResourceServerIdentityHeadersJwt. # noqa: E501
:rtype: str
"""
return self._hdr_name
@hdr_name.setter
def hdr_name(self, hdr_name):
"""Sets the hdr_name of this ResourceServerIdentityHeadersJwt.
The name of the HTTP header which will contain the generated JWT. # noqa: E501
:param hdr_name: The hdr_name of this ResourceServerIdentityHeadersJwt. # noqa: E501
:type: str
"""
self._hdr_name = hdr_name
@property
def claims(self):
"""Gets the claims of this ResourceServerIdentityHeadersJwt. # noqa: E501
The claims which are to be added to the JWT. The claim can either be obtained from a literal string, or from the value of a credential attribute. # noqa: E501
:return: The claims of this ResourceServerIdentityHeadersJwt. # noqa: E501
:rtype: list[ResourceServerIdentityHeadersJwtClaims]
"""
return self._claims
@claims.setter
def claims(self, claims):
"""Sets the claims of this ResourceServerIdentityHeadersJwt.
The claims which are to be added to the JWT. The claim can either be obtained from a literal string, or from the value of a credential attribute. # noqa: E501
:param claims: The claims of this ResourceServerIdentityHeadersJwt. # noqa: E501
:type: list[ResourceServerIdentityHeadersJwtClaims]
"""
self._claims = claims
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceServerIdentityHeadersJwt):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResourceServerIdentityHeadersJwt):
return True
return self.to_dict() != other.to_dict() | python/packages/ibm_application_gateway/config/resource_server_identity_headers_jwt.py | import pprint
import re # noqa: F401
import six
class ResourceServerIdentityHeadersJwt(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'certificate': 'list[str]',
'hdr_name': 'str',
'claims': 'list[ResourceServerIdentityHeadersJwtClaims]'
}
attribute_map = {
'certificate': 'certificate',
'hdr_name': 'hdr_name',
'claims': 'claims'
}
def __init__(self, certificate=None, hdr_name='jwt', claims=None): # noqa: E501
"""ResourceServerIdentityHeadersJwt - a model defined in OpenAPI""" # noqa: E501
self._certificate = None
self._hdr_name = None
self._claims = None
self.discriminator = None
if certificate is not None:
self.certificate = certificate
if hdr_name is not None:
self.hdr_name = hdr_name
if claims is not None:
self.claims = claims
@property
def certificate(self):
"""Gets the certificate of this ResourceServerIdentityHeadersJwt. # noqa: E501
PEM based personal certificate files which will be used to sign the JWT. These certificate files should include the private key, a certificate signed with the private key, and the signer certificate or signer certificate chain (if required). # noqa: E501
:return: The certificate of this ResourceServerIdentityHeadersJwt. # noqa: E501
:rtype: list[str]
"""
return self._certificate
@certificate.setter
def certificate(self, certificate):
"""Sets the certificate of this ResourceServerIdentityHeadersJwt.
PEM based personal certificate files which will be used to sign the JWT. These certificate files should include the private key, a certificate signed with the private key, and the signer certificate or signer certificate chain (if required). # noqa: E501
:param certificate: The certificate of this ResourceServerIdentityHeadersJwt. # noqa: E501
:type: list[str]
"""
self._certificate = certificate
@property
def hdr_name(self):
"""Gets the hdr_name of this ResourceServerIdentityHeadersJwt. # noqa: E501
The name of the HTTP header which will contain the generated JWT. # noqa: E501
:return: The hdr_name of this ResourceServerIdentityHeadersJwt. # noqa: E501
:rtype: str
"""
return self._hdr_name
@hdr_name.setter
def hdr_name(self, hdr_name):
"""Sets the hdr_name of this ResourceServerIdentityHeadersJwt.
The name of the HTTP header which will contain the generated JWT. # noqa: E501
:param hdr_name: The hdr_name of this ResourceServerIdentityHeadersJwt. # noqa: E501
:type: str
"""
self._hdr_name = hdr_name
@property
def claims(self):
"""Gets the claims of this ResourceServerIdentityHeadersJwt. # noqa: E501
The claims which are to be added to the JWT. The claim can either be obtained from a literal string, or from the value of a credential attribute. # noqa: E501
:return: The claims of this ResourceServerIdentityHeadersJwt. # noqa: E501
:rtype: list[ResourceServerIdentityHeadersJwtClaims]
"""
return self._claims
@claims.setter
def claims(self, claims):
"""Sets the claims of this ResourceServerIdentityHeadersJwt.
The claims which are to be added to the JWT. The claim can either be obtained from a literal string, or from the value of a credential attribute. # noqa: E501
:param claims: The claims of this ResourceServerIdentityHeadersJwt. # noqa: E501
:type: list[ResourceServerIdentityHeadersJwtClaims]
"""
self._claims = claims
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceServerIdentityHeadersJwt):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResourceServerIdentityHeadersJwt):
return True
return self.to_dict() != other.to_dict() | 0.778397 | 0.201656 |
import unittest
import os
class TestCodegen(unittest.TestCase):
"""
Test class around generating code from peripheral schemas.
"""
maxDiff = None
def gen_peripheral_template(self, template):
"""
Generates all I2C peripheral files for a given template.
"""
os.system('python3 cyanobyte/codegen.py \
-c \
-o ./tmp/ \
-t templates/' + template + '\
peripherals/ADS1015.yaml \
peripherals/BH1750FVI.yaml \
peripherals/BMP180.yaml \
peripherals/BMP280.yaml \
peripherals/LSM303D.yaml \
peripherals/MCP4725.yaml \
peripherals/MCP9808.yaml \
peripherals/TCS3472.yaml \
peripherals/example.yaml \
> /dev/null')
def gen_peripheral_tag(self, tag):
"""
Generates all I2C peripherals for a given tag.
"""
os.system('python3 cyanobyte/codegen.py \
-c \
-o ./tmp/ \
-t ' + tag + '\
peripherals/ADS1015.yaml \
peripherals/BH1750FVI.yaml \
peripherals/BMP180.yaml \
peripherals/BMP280.yaml \
peripherals/LSM303D.yaml \
peripherals/MCP4725.yaml \
peripherals/MCP9808.yaml \
peripherals/TCS3472.yaml \
peripherals/example.yaml \
> /dev/null')
def compare_files(self, platformName, extension):
"""
Compares the expected and actual file output for a given platform.
"""
peripherals = [
'ADS1015', 'BH1750FVI', 'BMP180', 'BMP280', 'LSM303D',
'MCP4725', 'MCP9808', 'TCS3472', 'Example'
]
test_path = 'test/sampleData'
tmp_path = 'tmp/com/cyanobyte'
for peripheral in peripherals:
full_test_path = os.path.join(
test_path,
platformName,
peripheral + '.' + extension
)
full_tmp_path = os.path.join(
tmp_path,
peripheral + '.' + extension
)
print('Comparing', full_test_path, 'and', full_tmp_path)
with open(full_test_path) as file1:
with open(full_tmp_path) as file2:
file_contents_1 = file1.read()
file_contents_2 = file2.read()
self.assertEqual(
file_contents_1,
file_contents_2,
msg="{0} and {1} are not the same".format(full_test_path, full_tmp_path)
)
def tearDown(self):
print('\n')
def test_arduino(self):
"""
Verify output of Arduino template.
"""
self.gen_peripheral_tag('arduino')
self.compare_files('arduino', 'cpp')
self.compare_files('arduino', 'h')
def test_circuitpython(self):
"""
Verify output of CircuitPython template.
"""
self.gen_peripheral_template('circuitpython.py')
self.compare_files('circuitpython', 'py')
def test_cmsis_svd(self):
"""
Verify output of CMSIS template.
"""
self.gen_peripheral_template('cmsis.svd')
self.compare_files('cmsis-svd', 'svd')
def test_embeddedc(self):
"""
Verify output of Embedded C template.
"""
self.gen_peripheral_tag('embedded')
self.compare_files('embedded-c', 'c')
self.compare_files('embedded-c', 'h')
def test_esp32(self):
"""
Verify output of ESP32 template.
"""
self.gen_peripheral_tag('esp32')
self.compare_files('esp32', 'cpp')
self.compare_files('esp32', 'h')
def test_espruino(self):
"""
Verify output of Espruino template.
"""
self.gen_peripheral_tag('espruino')
self.compare_files('espruino', 'js')
def test_i2c_device(self):
"""
Verify output of Pimoroni i2c-device template.
"""
self.gen_peripheral_tag('i2cdevice')
self.compare_files('i2c-device', 'py')
def test_kubos(self):
"""
Verify output of Kubos template.
"""
self.gen_peripheral_tag('kubos')
self.compare_files('kubos', 'c')
self.compare_files('kubos', 'h')
def test_latex(self):
"""
Verify output of LaTeX datasheet template.
"""
self.gen_peripheral_tag('datasheet')
self.compare_files('datasheet', 'tex')
def test_markdown(self):
"""
Verify output of Markdown web-hosting template.
"""
self.gen_peripheral_tag('doc')
self.compare_files('markdown', 'md')
def test_micropython(self):
"""
Verify output of Micropython template.
"""
self.gen_peripheral_tag('micropython')
self.compare_files('micropython', 'py')
def test_raspberrypi(self):
"""
Verify output of Raspberry Pi template.
"""
self.gen_peripheral_tag('raspberrypi')
self.compare_files('raspberrypi', 'py')
def test_rpi_emboss(self):
"""
Verify output of Raspberry Pi template with Emboss/SPI.
"""
# Generate Peripheral Tag
os.system('python3 cyanobyte/codegen.py \
-c \
--debug \
-o ./tmp/ \
-t templates/raspberrypi-spi-emboss.py\
peripherals/examplespi-emboss.yaml \
> /dev/null')
# Compare files
test_path = 'test/sampleData/raspberrypi/ExampleSpiEmboss.py'
tmp_path = 'tmp/com/cyanobyte/ExampleSpiEmboss.py'
print('Comparing', test_path, 'and', tmp_path)
with open(test_path) as file1:
with open(tmp_path) as file2:
file_contents_1 = file1.read()
file_contents_2 = file2.read()
self.assertEqual(
file_contents_1,
file_contents_2,
msg="{0} and {1} are not the same".format(test_path, tmp_path)
)
if __name__ == '__main__':
unittest.main() | test/test_codegen.py |
import unittest
import os
class TestCodegen(unittest.TestCase):
"""
Test class around generating code from peripheral schemas.
"""
maxDiff = None
def gen_peripheral_template(self, template):
"""
Generates all I2C peripheral files for a given template.
"""
os.system('python3 cyanobyte/codegen.py \
-c \
-o ./tmp/ \
-t templates/' + template + '\
peripherals/ADS1015.yaml \
peripherals/BH1750FVI.yaml \
peripherals/BMP180.yaml \
peripherals/BMP280.yaml \
peripherals/LSM303D.yaml \
peripherals/MCP4725.yaml \
peripherals/MCP9808.yaml \
peripherals/TCS3472.yaml \
peripherals/example.yaml \
> /dev/null')
def gen_peripheral_tag(self, tag):
"""
Generates all I2C peripherals for a given tag.
"""
os.system('python3 cyanobyte/codegen.py \
-c \
-o ./tmp/ \
-t ' + tag + '\
peripherals/ADS1015.yaml \
peripherals/BH1750FVI.yaml \
peripherals/BMP180.yaml \
peripherals/BMP280.yaml \
peripherals/LSM303D.yaml \
peripherals/MCP4725.yaml \
peripherals/MCP9808.yaml \
peripherals/TCS3472.yaml \
peripherals/example.yaml \
> /dev/null')
def compare_files(self, platformName, extension):
"""
Compares the expected and actual file output for a given platform.
"""
peripherals = [
'ADS1015', 'BH1750FVI', 'BMP180', 'BMP280', 'LSM303D',
'MCP4725', 'MCP9808', 'TCS3472', 'Example'
]
test_path = 'test/sampleData'
tmp_path = 'tmp/com/cyanobyte'
for peripheral in peripherals:
full_test_path = os.path.join(
test_path,
platformName,
peripheral + '.' + extension
)
full_tmp_path = os.path.join(
tmp_path,
peripheral + '.' + extension
)
print('Comparing', full_test_path, 'and', full_tmp_path)
with open(full_test_path) as file1:
with open(full_tmp_path) as file2:
file_contents_1 = file1.read()
file_contents_2 = file2.read()
self.assertEqual(
file_contents_1,
file_contents_2,
msg="{0} and {1} are not the same".format(full_test_path, full_tmp_path)
)
def tearDown(self):
print('\n')
def test_arduino(self):
"""
Verify output of Arduino template.
"""
self.gen_peripheral_tag('arduino')
self.compare_files('arduino', 'cpp')
self.compare_files('arduino', 'h')
def test_circuitpython(self):
"""
Verify output of CircuitPython template.
"""
self.gen_peripheral_template('circuitpython.py')
self.compare_files('circuitpython', 'py')
def test_cmsis_svd(self):
"""
Verify output of CMSIS template.
"""
self.gen_peripheral_template('cmsis.svd')
self.compare_files('cmsis-svd', 'svd')
def test_embeddedc(self):
"""
Verify output of Embedded C template.
"""
self.gen_peripheral_tag('embedded')
self.compare_files('embedded-c', 'c')
self.compare_files('embedded-c', 'h')
def test_esp32(self):
"""
Verify output of ESP32 template.
"""
self.gen_peripheral_tag('esp32')
self.compare_files('esp32', 'cpp')
self.compare_files('esp32', 'h')
def test_espruino(self):
"""
Verify output of Espruino template.
"""
self.gen_peripheral_tag('espruino')
self.compare_files('espruino', 'js')
def test_i2c_device(self):
"""
Verify output of Pimoroni i2c-device template.
"""
self.gen_peripheral_tag('i2cdevice')
self.compare_files('i2c-device', 'py')
def test_kubos(self):
"""
Verify output of Kubos template.
"""
self.gen_peripheral_tag('kubos')
self.compare_files('kubos', 'c')
self.compare_files('kubos', 'h')
def test_latex(self):
"""
Verify output of LaTeX datasheet template.
"""
self.gen_peripheral_tag('datasheet')
self.compare_files('datasheet', 'tex')
def test_markdown(self):
"""
Verify output of Markdown web-hosting template.
"""
self.gen_peripheral_tag('doc')
self.compare_files('markdown', 'md')
def test_micropython(self):
"""
Verify output of Micropython template.
"""
self.gen_peripheral_tag('micropython')
self.compare_files('micropython', 'py')
def test_raspberrypi(self):
"""
Verify output of Raspberry Pi template.
"""
self.gen_peripheral_tag('raspberrypi')
self.compare_files('raspberrypi', 'py')
def test_rpi_emboss(self):
"""
Verify output of Raspberry Pi template with Emboss/SPI.
"""
# Generate Peripheral Tag
os.system('python3 cyanobyte/codegen.py \
-c \
--debug \
-o ./tmp/ \
-t templates/raspberrypi-spi-emboss.py\
peripherals/examplespi-emboss.yaml \
> /dev/null')
# Compare files
test_path = 'test/sampleData/raspberrypi/ExampleSpiEmboss.py'
tmp_path = 'tmp/com/cyanobyte/ExampleSpiEmboss.py'
print('Comparing', test_path, 'and', tmp_path)
with open(test_path) as file1:
with open(tmp_path) as file2:
file_contents_1 = file1.read()
file_contents_2 = file2.read()
self.assertEqual(
file_contents_1,
file_contents_2,
msg="{0} and {1} are not the same".format(test_path, tmp_path)
)
if __name__ == '__main__':
unittest.main() | 0.507324 | 0.495484 |
__author__ = 'iambocai'
import sys, urllib2, base64, json, time,socket
step = 60
ip = socket.gethostname()
ts = int(time.time())
keys = ('messages_ready', 'messages_unacknowledged')
rates = ('ack', 'deliver', 'deliver_get', 'publish')
request = urllib2.Request("http://%s:15672/api/queues" %ip)
# see #issue4
base64string = base64.b64encode('guest:guest')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
data = json.loads(result.read())
tag = ''
#tag = sys.argv[1].replace('_',',').replace('.','=')
p = []
for queue in data:
# ready and unack
msg_total = 0
for key in keys:
q = {}
q["endpoint"] = ip
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.%s' % key
q['tags'] = 'name=%s,%s' % (queue['name'],tag)
q['value'] = int(queue[key])
msg_total += q['value']
p.append(q)
# total
q = {}
q["endpoint"] = ip
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.messages_total'
q['tags'] = 'name=%s,%s' % (queue['name'],tag)
q['value'] = msg_total
p.append(q)
# rates
for rate in rates:
q = {}
q["endpoint"] = ip
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.%s_rate' % rate
q['tags'] = 'name=%s,%s' % (queue['name'],tag)
try:
q['value'] = int(queue['message_stats']["%s_details" % rate]['rate'])
except:
q['value'] = 0
p.append(q)
print json.dumps(p, indent=4)
method = "POST"
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
url = 'http://127.0.0.1:1988/v1/push'
request = urllib2.Request(url, data=json.dumps(p) )
request.add_header("Content-Type",'application/json')
request.get_method = lambda: method
try:
connection = opener.open(request)
except urllib2.HTTPError,e:
connection = e
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
print connection.read()
else:
print '{"err":1,"msg":"%s"}' % connection | falcon-monit-scripts/rabbitmq/rabbitmq-monitor.py |
__author__ = 'iambocai'
import sys, urllib2, base64, json, time,socket
step = 60
ip = socket.gethostname()
ts = int(time.time())
keys = ('messages_ready', 'messages_unacknowledged')
rates = ('ack', 'deliver', 'deliver_get', 'publish')
request = urllib2.Request("http://%s:15672/api/queues" %ip)
# see #issue4
base64string = base64.b64encode('guest:guest')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
data = json.loads(result.read())
tag = ''
#tag = sys.argv[1].replace('_',',').replace('.','=')
p = []
for queue in data:
# ready and unack
msg_total = 0
for key in keys:
q = {}
q["endpoint"] = ip
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.%s' % key
q['tags'] = 'name=%s,%s' % (queue['name'],tag)
q['value'] = int(queue[key])
msg_total += q['value']
p.append(q)
# total
q = {}
q["endpoint"] = ip
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.messages_total'
q['tags'] = 'name=%s,%s' % (queue['name'],tag)
q['value'] = msg_total
p.append(q)
# rates
for rate in rates:
q = {}
q["endpoint"] = ip
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.%s_rate' % rate
q['tags'] = 'name=%s,%s' % (queue['name'],tag)
try:
q['value'] = int(queue['message_stats']["%s_details" % rate]['rate'])
except:
q['value'] = 0
p.append(q)
print json.dumps(p, indent=4)
method = "POST"
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
url = 'http://127.0.0.1:1988/v1/push'
request = urllib2.Request(url, data=json.dumps(p) )
request.add_header("Content-Type",'application/json')
request.get_method = lambda: method
try:
connection = opener.open(request)
except urllib2.HTTPError,e:
connection = e
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
print connection.read()
else:
print '{"err":1,"msg":"%s"}' % connection | 0.095766 | 0.100702 |
# Editor
import wx
import os
import tempfile
def mktemp():
fd, name = tempfile.mkstemp()
os.close(fd)
return name
class Editor(wx.Frame):
def __init__(self, parent, id, title, size=(600, 500)):
wx.Frame.__init__(self, parent, id, title, size=size)
# variables
self.modify = False
self.last_name_saved = ''
self.replace = False
# setting up menubar
menubar = wx.MenuBar()
file = wx.Menu()
new = wx.MenuItem(file, 101, '&New\tCtrl+N', 'Creates a new document')
bmp = wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_OTHER, (16,16))
new.SetBitmap(bmp)
file.AppendItem(new)
open = wx.MenuItem(file, 102, '&Open\tCtrl+O', 'Open an existing file')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, (16,16))
open.SetBitmap(bmp)
file.AppendItem(open)
file.AppendSeparator()
save = wx.MenuItem(file, 103, '&Save\tCtrl+S', 'Save the file')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_OTHER, (16,16))
save.SetBitmap(bmp)
file.AppendItem(save)
saveas = wx.MenuItem(file, 104, 'Save &As...\tShift+Ctrl+S', 'Save the file with a different name')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE_AS, wx.ART_OTHER, (16,16))
saveas.SetBitmap(bmp)
file.AppendItem(saveas)
file.AppendSeparator()
quit = wx.MenuItem(file, 105, '&Quit\tCtrl+Q', 'Quit the Application')
bmp = wx.ArtProvider.GetBitmap(wx.ART_QUIT, wx.ART_OTHER, (16,16))
quit.SetBitmap(bmp)
file.AppendItem(quit)
edit = wx.Menu()
cut = wx.MenuItem(edit, 106, '&Cut\tCtrl+X', 'Cut the Selection')
bmp = wx.ArtProvider.GetBitmap(wx.ART_CUT, wx.ART_OTHER, (16,16))
cut.SetBitmap(bmp)
edit.AppendItem(cut)
copy = wx.MenuItem(edit, 107, '&Copy\tCtrl+C', 'Copy the Selection')
bmp = wx.ArtProvider.GetBitmap(wx.ART_COPY, wx.ART_OTHER, (16,16))
copy.SetBitmap(bmp)
edit.AppendItem(copy)
paste = wx.MenuItem(edit, 108, '&Paste\tCtrl+V', 'Paste text from clipboard')
bmp = wx.ArtProvider.GetBitmap(wx.ART_PASTE, wx.ART_OTHER, (16,16))
paste.SetBitmap(bmp)
edit.AppendItem(paste)
delete = wx.MenuItem(edit, 109, '&Delete', 'Delete the selected text')
bmp = wx.ArtProvider.GetBitmap(wx.ART_DELETE, wx.ART_OTHER, (16,16))
delete.SetBitmap(bmp)
edit.AppendItem(delete)
edit.AppendSeparator()
edit.Append(110, 'Select &All\tCtrl+A', 'Select the entire text')
view = wx.Menu()
view.Append(111, '&Statusbar', 'Show StatusBar')
help = wx.Menu()
about = wx.MenuItem(help, 112, '&About\tF1', 'About Editor')
bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (16,16))
about.SetBitmap(bmp)
help.AppendItem(about)
menubar.Append(file, '&File')
menubar.Append(edit, '&Edit')
menubar.Append(view, '&View')
menubar.Append(help, '&Help')
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.NewApplication, id=101)
self.Bind(wx.EVT_MENU, self.OnOpenFile, id=102)
self.Bind(wx.EVT_MENU, self.OnSaveFile, id=103)
self.Bind(wx.EVT_MENU, self.OnSaveAsFile, id=104)
self.Bind(wx.EVT_MENU, self.QuitApplication, id=105)
self.Bind(wx.EVT_MENU, self.OnCut, id=106)
self.Bind(wx.EVT_MENU, self.OnCopy, id=107)
self.Bind(wx.EVT_MENU, self.OnPaste, id=108)
self.Bind(wx.EVT_MENU, self.OnDelete, id=109)
self.Bind(wx.EVT_MENU, self.OnSelectAll, id=110)
self.Bind(wx.EVT_MENU, self.ToggleStatusBar, id=111)
self.Bind(wx.EVT_MENU, self.OnAbout, id=112)
# setting up toolbar
self.toolbar = self.CreateToolBar( wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT | wx.TB_TEXT )
bmp = wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(801, bmp, 'New', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(802, bmp, 'Open', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(803, bmp, 'Save', '')
self.toolbar.AddSeparator()
bmp = wx.ArtProvider.GetBitmap(wx.ART_CUT, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(804, bmp, 'Cut', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_COPY, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(805, bmp, 'Copy', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_PASTE, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(806, bmp, 'Paste', '')
self.toolbar.AddSeparator()
bmp = wx.ArtProvider.GetBitmap(wx.ART_QUIT, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(807, bmp, 'Exit', '')
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.NewApplication, id=801)
self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=802)
self.Bind(wx.EVT_TOOL, self.OnSaveFile, id=803)
self.Bind(wx.EVT_TOOL, self.OnCut, id=804)
self.Bind(wx.EVT_TOOL, self.OnCopy, id=805)
self.Bind(wx.EVT_TOOL, self.OnPaste, id=806)
self.Bind(wx.EVT_TOOL, self.QuitApplication, id=807)
self.text = wx.TextCtrl(self, 1000, '', size=(-1, -1), style=wx.TE_MULTILINE | wx.TE_PROCESS_ENTER)
self.text.SetFocus()
self.text.Bind(wx.EVT_TEXT, self.OnTextChanged, id=1000)
self.text.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_CLOSE, self.QuitApplication)
self.StatusBar()
self.Centre()
self.Show(True)
def NewApplication(self, event):
editor = Editor(None, -1, 'Editor')
editor.Centre()
editor.Show()
def OnOpenFile(self, event):
file_name = os.path.basename(self.last_name_saved)
if self.modify:
dlg = wx.MessageDialog(self, 'Save changes?', '', wx.YES_NO | wx.YES_DEFAULT | wx.CANCEL |
wx.ICON_QUESTION)
val = dlg.ShowModal()
if val == wx.ID_YES:
self.OnSaveFile(event)
self.DoOpenFile()
elif val == wx.ID_CANCEL:
dlg.Destroy()
else:
self.DoOpenFile()
else:
self.DoOpenFile()
def DoOpenFile(self, filename=None, last_name_saved=True):
if filename is None:
wcd = 'All files (*)|*|Editor files (*.ef)|*.ef|'
dir = os.getcwd()
open_dlg = wx.FileDialog(self, message='Choose a file', defaultDir=dir, defaultFile='',
wildcard=wcd, style=wx.OPEN|wx.CHANGE_DIR)
if open_dlg.ShowModal() == wx.ID_OK:
path = open_dlg.GetPath()
else:
path = filename
try:
file = open(path, 'r')
text = file.read()
file.close()
if self.text.GetLastPosition():
self.text.Clear()
self.text.WriteText(text)
if last_name_saved:
self.last_name_saved = path
self.statusbar.SetStatusText('', 1)
self.modify = False
except IOError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
except UnicodeDecodeError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
if filename is None:
open_dlg.Destroy()
return path
def DoOpenGeometryAsFile(self, geo):
try:
filename = mktemp()
filename = filename.split('.')[0]
filename = filename + ".xml"
except:
filename = "geo.xml"
geo.save(filename)
path = filename
try:
file = open(path, 'r')
text = file.read()
file.close()
if self.text.GetLastPosition():
self.text.Clear()
self.text.WriteText(text)
self.last_name_saved = path
self.statusbar.SetStatusText('', 1)
self.modify = False
return filename
except IOError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
except UnicodeDecodeError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
def OnSaveFile(self, event):
if self.last_name_saved:
try:
file = open(self.last_name_saved, 'w')
text = self.text.GetValue()
file.write(text)
file.close()
self.statusbar.SetStatusText(os.path.basename(self.last_name_saved) + ' saved', 0)
self.modify = False
self.statusbar.SetStatusText('', 1)
except IOError as error:
dlg = wx.MessageDialog(self, 'Error saving file\n' + str(error))
dlg.ShowModal()
else:
self.OnSaveAsFile(event)
def OnSaveAsFile(self, event):
wcd='All files(*)|*|Editor files (*.ef)|*.ef|'
dir = os.getcwd()
save_dlg = wx.FileDialog(self, message='Save file as...', defaultDir=dir, defaultFile='',
wildcard=wcd, style=wx.SAVE | wx.OVERWRITE_PROMPT)
if save_dlg.ShowModal() == wx.ID_OK:
path = save_dlg.GetPath()
try:
file = open(path, 'w')
text = self.text.GetValue()
file.write(text)
file.close()
self.last_name_saved = os.path.basename(path)
self.statusbar.SetStatusText(self.last_name_saved + ' saved', 0)
self.modify = False
self.statusbar.SetStatusText('', 1)
except IOError as error:
dlg = wx.MessageDialog(self, 'Error saving file\n' + str(error))
dlg.ShowModal()
save_dlg.Destroy()
def OnCut(self, event):
self.text.Cut()
def OnCopy(self, event):
self.text.Copy()
def OnPaste(self, event):
self.text.Paste()
def QuitApplication(self, event):
if self.modify:
dlg = wx.MessageDialog(self, 'Save before Exit?', '', wx.YES_NO | wx.YES_DEFAULT |
wx.CANCEL | wx.ICON_QUESTION)
val = dlg.ShowModal()
if val == wx.ID_YES:
self.OnSaveFile(event)
if not self.modify:
wx.Exit()
elif val == wx.ID_CANCEL:
dlg.Destroy()
else:
self.Destroy()
else:
self.Destroy()
def OnDelete(self, event):
frm, to = self.text.GetSelection()
self.text.Remove(frm, to)
def OnSelectAll(self, event):
self.text.SelectAll()
def OnTextChanged(self, event):
self.modify = True
self.statusbar.SetStatusText(' modified', 1)
event.Skip()
def OnKeyDown(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_INSERT:
if not self.replace:
self.statusbar.SetStatusText('INS', 2)
self.replace = True
else:
self.statusbar.SetStatusText('', 2)
self.replace = False
event.Skip()
def ToggleStatusBar(self, event):
if self.statusbar.IsShown():
self.statusbar.Hide()
else:
self.statusbar.Show()
def StatusBar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-5, -2, -1])
def OnAbout(self, event):
dlg = wx.MessageDialog(self, '\tEditor\t\n Another Tutorial\n<NAME> 2005-2006',
'About Editor', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
#app = wx.App()
#Editor(None, -1, 'Editor')
#app.MainLoop() | caid-gui/Editor.py |
# Editor
import wx
import os
import tempfile
def mktemp():
fd, name = tempfile.mkstemp()
os.close(fd)
return name
class Editor(wx.Frame):
def __init__(self, parent, id, title, size=(600, 500)):
wx.Frame.__init__(self, parent, id, title, size=size)
# variables
self.modify = False
self.last_name_saved = ''
self.replace = False
# setting up menubar
menubar = wx.MenuBar()
file = wx.Menu()
new = wx.MenuItem(file, 101, '&New\tCtrl+N', 'Creates a new document')
bmp = wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_OTHER, (16,16))
new.SetBitmap(bmp)
file.AppendItem(new)
open = wx.MenuItem(file, 102, '&Open\tCtrl+O', 'Open an existing file')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, (16,16))
open.SetBitmap(bmp)
file.AppendItem(open)
file.AppendSeparator()
save = wx.MenuItem(file, 103, '&Save\tCtrl+S', 'Save the file')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_OTHER, (16,16))
save.SetBitmap(bmp)
file.AppendItem(save)
saveas = wx.MenuItem(file, 104, 'Save &As...\tShift+Ctrl+S', 'Save the file with a different name')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE_AS, wx.ART_OTHER, (16,16))
saveas.SetBitmap(bmp)
file.AppendItem(saveas)
file.AppendSeparator()
quit = wx.MenuItem(file, 105, '&Quit\tCtrl+Q', 'Quit the Application')
bmp = wx.ArtProvider.GetBitmap(wx.ART_QUIT, wx.ART_OTHER, (16,16))
quit.SetBitmap(bmp)
file.AppendItem(quit)
edit = wx.Menu()
cut = wx.MenuItem(edit, 106, '&Cut\tCtrl+X', 'Cut the Selection')
bmp = wx.ArtProvider.GetBitmap(wx.ART_CUT, wx.ART_OTHER, (16,16))
cut.SetBitmap(bmp)
edit.AppendItem(cut)
copy = wx.MenuItem(edit, 107, '&Copy\tCtrl+C', 'Copy the Selection')
bmp = wx.ArtProvider.GetBitmap(wx.ART_COPY, wx.ART_OTHER, (16,16))
copy.SetBitmap(bmp)
edit.AppendItem(copy)
paste = wx.MenuItem(edit, 108, '&Paste\tCtrl+V', 'Paste text from clipboard')
bmp = wx.ArtProvider.GetBitmap(wx.ART_PASTE, wx.ART_OTHER, (16,16))
paste.SetBitmap(bmp)
edit.AppendItem(paste)
delete = wx.MenuItem(edit, 109, '&Delete', 'Delete the selected text')
bmp = wx.ArtProvider.GetBitmap(wx.ART_DELETE, wx.ART_OTHER, (16,16))
delete.SetBitmap(bmp)
edit.AppendItem(delete)
edit.AppendSeparator()
edit.Append(110, 'Select &All\tCtrl+A', 'Select the entire text')
view = wx.Menu()
view.Append(111, '&Statusbar', 'Show StatusBar')
help = wx.Menu()
about = wx.MenuItem(help, 112, '&About\tF1', 'About Editor')
bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (16,16))
about.SetBitmap(bmp)
help.AppendItem(about)
menubar.Append(file, '&File')
menubar.Append(edit, '&Edit')
menubar.Append(view, '&View')
menubar.Append(help, '&Help')
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.NewApplication, id=101)
self.Bind(wx.EVT_MENU, self.OnOpenFile, id=102)
self.Bind(wx.EVT_MENU, self.OnSaveFile, id=103)
self.Bind(wx.EVT_MENU, self.OnSaveAsFile, id=104)
self.Bind(wx.EVT_MENU, self.QuitApplication, id=105)
self.Bind(wx.EVT_MENU, self.OnCut, id=106)
self.Bind(wx.EVT_MENU, self.OnCopy, id=107)
self.Bind(wx.EVT_MENU, self.OnPaste, id=108)
self.Bind(wx.EVT_MENU, self.OnDelete, id=109)
self.Bind(wx.EVT_MENU, self.OnSelectAll, id=110)
self.Bind(wx.EVT_MENU, self.ToggleStatusBar, id=111)
self.Bind(wx.EVT_MENU, self.OnAbout, id=112)
# setting up toolbar
self.toolbar = self.CreateToolBar( wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT | wx.TB_TEXT )
bmp = wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(801, bmp, 'New', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(802, bmp, 'Open', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(803, bmp, 'Save', '')
self.toolbar.AddSeparator()
bmp = wx.ArtProvider.GetBitmap(wx.ART_CUT, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(804, bmp, 'Cut', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_COPY, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(805, bmp, 'Copy', '')
bmp = wx.ArtProvider.GetBitmap(wx.ART_PASTE, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(806, bmp, 'Paste', '')
self.toolbar.AddSeparator()
bmp = wx.ArtProvider.GetBitmap(wx.ART_QUIT, wx.ART_OTHER, (16,16))
self.toolbar.AddSimpleTool(807, bmp, 'Exit', '')
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.NewApplication, id=801)
self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=802)
self.Bind(wx.EVT_TOOL, self.OnSaveFile, id=803)
self.Bind(wx.EVT_TOOL, self.OnCut, id=804)
self.Bind(wx.EVT_TOOL, self.OnCopy, id=805)
self.Bind(wx.EVT_TOOL, self.OnPaste, id=806)
self.Bind(wx.EVT_TOOL, self.QuitApplication, id=807)
self.text = wx.TextCtrl(self, 1000, '', size=(-1, -1), style=wx.TE_MULTILINE | wx.TE_PROCESS_ENTER)
self.text.SetFocus()
self.text.Bind(wx.EVT_TEXT, self.OnTextChanged, id=1000)
self.text.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_CLOSE, self.QuitApplication)
self.StatusBar()
self.Centre()
self.Show(True)
def NewApplication(self, event):
editor = Editor(None, -1, 'Editor')
editor.Centre()
editor.Show()
def OnOpenFile(self, event):
file_name = os.path.basename(self.last_name_saved)
if self.modify:
dlg = wx.MessageDialog(self, 'Save changes?', '', wx.YES_NO | wx.YES_DEFAULT | wx.CANCEL |
wx.ICON_QUESTION)
val = dlg.ShowModal()
if val == wx.ID_YES:
self.OnSaveFile(event)
self.DoOpenFile()
elif val == wx.ID_CANCEL:
dlg.Destroy()
else:
self.DoOpenFile()
else:
self.DoOpenFile()
def DoOpenFile(self, filename=None, last_name_saved=True):
if filename is None:
wcd = 'All files (*)|*|Editor files (*.ef)|*.ef|'
dir = os.getcwd()
open_dlg = wx.FileDialog(self, message='Choose a file', defaultDir=dir, defaultFile='',
wildcard=wcd, style=wx.OPEN|wx.CHANGE_DIR)
if open_dlg.ShowModal() == wx.ID_OK:
path = open_dlg.GetPath()
else:
path = filename
try:
file = open(path, 'r')
text = file.read()
file.close()
if self.text.GetLastPosition():
self.text.Clear()
self.text.WriteText(text)
if last_name_saved:
self.last_name_saved = path
self.statusbar.SetStatusText('', 1)
self.modify = False
except IOError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
except UnicodeDecodeError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
if filename is None:
open_dlg.Destroy()
return path
def DoOpenGeometryAsFile(self, geo):
try:
filename = mktemp()
filename = filename.split('.')[0]
filename = filename + ".xml"
except:
filename = "geo.xml"
geo.save(filename)
path = filename
try:
file = open(path, 'r')
text = file.read()
file.close()
if self.text.GetLastPosition():
self.text.Clear()
self.text.WriteText(text)
self.last_name_saved = path
self.statusbar.SetStatusText('', 1)
self.modify = False
return filename
except IOError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
except UnicodeDecodeError as error:
dlg = wx.MessageDialog(self, 'Error opening file\n' + str(error))
dlg.ShowModal()
def OnSaveFile(self, event):
if self.last_name_saved:
try:
file = open(self.last_name_saved, 'w')
text = self.text.GetValue()
file.write(text)
file.close()
self.statusbar.SetStatusText(os.path.basename(self.last_name_saved) + ' saved', 0)
self.modify = False
self.statusbar.SetStatusText('', 1)
except IOError as error:
dlg = wx.MessageDialog(self, 'Error saving file\n' + str(error))
dlg.ShowModal()
else:
self.OnSaveAsFile(event)
def OnSaveAsFile(self, event):
wcd='All files(*)|*|Editor files (*.ef)|*.ef|'
dir = os.getcwd()
save_dlg = wx.FileDialog(self, message='Save file as...', defaultDir=dir, defaultFile='',
wildcard=wcd, style=wx.SAVE | wx.OVERWRITE_PROMPT)
if save_dlg.ShowModal() == wx.ID_OK:
path = save_dlg.GetPath()
try:
file = open(path, 'w')
text = self.text.GetValue()
file.write(text)
file.close()
self.last_name_saved = os.path.basename(path)
self.statusbar.SetStatusText(self.last_name_saved + ' saved', 0)
self.modify = False
self.statusbar.SetStatusText('', 1)
except IOError as error:
dlg = wx.MessageDialog(self, 'Error saving file\n' + str(error))
dlg.ShowModal()
save_dlg.Destroy()
def OnCut(self, event):
self.text.Cut()
def OnCopy(self, event):
self.text.Copy()
def OnPaste(self, event):
self.text.Paste()
def QuitApplication(self, event):
if self.modify:
dlg = wx.MessageDialog(self, 'Save before Exit?', '', wx.YES_NO | wx.YES_DEFAULT |
wx.CANCEL | wx.ICON_QUESTION)
val = dlg.ShowModal()
if val == wx.ID_YES:
self.OnSaveFile(event)
if not self.modify:
wx.Exit()
elif val == wx.ID_CANCEL:
dlg.Destroy()
else:
self.Destroy()
else:
self.Destroy()
def OnDelete(self, event):
frm, to = self.text.GetSelection()
self.text.Remove(frm, to)
def OnSelectAll(self, event):
self.text.SelectAll()
def OnTextChanged(self, event):
self.modify = True
self.statusbar.SetStatusText(' modified', 1)
event.Skip()
def OnKeyDown(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_INSERT:
if not self.replace:
self.statusbar.SetStatusText('INS', 2)
self.replace = True
else:
self.statusbar.SetStatusText('', 2)
self.replace = False
event.Skip()
def ToggleStatusBar(self, event):
if self.statusbar.IsShown():
self.statusbar.Hide()
else:
self.statusbar.Show()
def StatusBar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-5, -2, -1])
def OnAbout(self, event):
dlg = wx.MessageDialog(self, '\tEditor\t\n Another Tutorial\n<NAME> 2005-2006',
'About Editor', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
#app = wx.App()
#Editor(None, -1, 'Editor')
#app.MainLoop() | 0.309337 | 0.080574 |
import psycopg2, psycopg2.extras, tokens
from datetime import datetime
from flask import g, Blueprint, request, make_response, jsonify
from flask_login import login_required, current_user
from config import app, Config
auths = Blueprint('auths', __name__)
def generate(user_id):
token = tokens.generate(user_id, g.db)
if token:
app.logger.info('X-Auth-Token generated')
return token
else:
app.logger.error('Generate X-Auth-Token failed: %s' % str(e))
return None
def revoke(user_id):
if tokens.revoke(user_id, g.db):
app.logger.info('X-Auth-Token revoked')
return True
else:
app.logger.error('Revoke X-Auth-Token failed: %s' % str(e))
return None
@auths.route('/renewal/<token>', methods = ['GET'])
@login_required
def renewal(token):
try:
with g.db.cursor(cursor_factory = psycopg2.extras.DictCursor) as cursor:
cursor.execute("""
UPDATE minicloud_auths SET updated_at = now()
FROM minicloud_users AS b
WHERE b.id = %s AND b.id = user_id AND token = %s
""", [current_user.id, token])
g.db.commit()
return make_response(jsonify([]), 200)
except Exception as e:
g.db.rollback()
return make_response(jsonify([]), 500)
@auths.route('/verify', methods = ['GET'])
def verify():
token, data = [None, None]
if 'X-Auth-Token' in request.headers:
app.logger.info('X-Auth-Token: %s' % request.headers['X-Auth-Token'])
token = request.headers['X-Auth-Token']
try:
if not token:
raise Exception('not given')
with g.db.cursor(cursor_factory = psycopg2.extras.DictCursor) as cursor:
cursor.execute("""
SELECT token, updated_at FROM minicloud_auths
WHERE token = %s ORDER BY created_at DESC LIMIT 1
""", [token])
data = cursor.fetchone()
if not data or not data['token'] == token:
raise Exception('%s invalid' % token)
# Token is not older then 5min (300sec)
created_at = data['updated_at'].replace(tzinfo=Config.UTCZONE).timestamp()
current_time = datetime.utcnow().replace(tzinfo=Config.UTCZONE).timestamp()
if (current_time - created_at) > 300:
raise Exception('%s expired' % token)
return ('', 201)
except Exception as e:
app.logger.error('X-Auth-Token (%s):' % str(e))
return ('', 401) | auths.py | import psycopg2, psycopg2.extras, tokens
from datetime import datetime
from flask import g, Blueprint, request, make_response, jsonify
from flask_login import login_required, current_user
from config import app, Config
auths = Blueprint('auths', __name__)
def generate(user_id):
token = tokens.generate(user_id, g.db)
if token:
app.logger.info('X-Auth-Token generated')
return token
else:
app.logger.error('Generate X-Auth-Token failed: %s' % str(e))
return None
def revoke(user_id):
if tokens.revoke(user_id, g.db):
app.logger.info('X-Auth-Token revoked')
return True
else:
app.logger.error('Revoke X-Auth-Token failed: %s' % str(e))
return None
@auths.route('/renewal/<token>', methods = ['GET'])
@login_required
def renewal(token):
try:
with g.db.cursor(cursor_factory = psycopg2.extras.DictCursor) as cursor:
cursor.execute("""
UPDATE minicloud_auths SET updated_at = now()
FROM minicloud_users AS b
WHERE b.id = %s AND b.id = user_id AND token = %s
""", [current_user.id, token])
g.db.commit()
return make_response(jsonify([]), 200)
except Exception as e:
g.db.rollback()
return make_response(jsonify([]), 500)
@auths.route('/verify', methods = ['GET'])
def verify():
token, data = [None, None]
if 'X-Auth-Token' in request.headers:
app.logger.info('X-Auth-Token: %s' % request.headers['X-Auth-Token'])
token = request.headers['X-Auth-Token']
try:
if not token:
raise Exception('not given')
with g.db.cursor(cursor_factory = psycopg2.extras.DictCursor) as cursor:
cursor.execute("""
SELECT token, updated_at FROM minicloud_auths
WHERE token = %s ORDER BY created_at DESC LIMIT 1
""", [token])
data = cursor.fetchone()
if not data or not data['token'] == token:
raise Exception('%s invalid' % token)
# Token is not older then 5min (300sec)
created_at = data['updated_at'].replace(tzinfo=Config.UTCZONE).timestamp()
current_time = datetime.utcnow().replace(tzinfo=Config.UTCZONE).timestamp()
if (current_time - created_at) > 300:
raise Exception('%s expired' % token)
return ('', 201)
except Exception as e:
app.logger.error('X-Auth-Token (%s):' % str(e))
return ('', 401) | 0.353651 | 0.071819 |
from django.shortcuts import render, redirect
from django.template import loader
from django.http import HttpResponseRedirect
from django.views import generic
from .models import Code, User
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, DeleteView
from django.contrib.auth import views, authenticate, login, logout
loggedIn = False
#checks validity of the username requested by client
def validity(uid):
#put in email id checker code
if(User.objects.filter(username=uid).exists()):
return False
return True
#homepage
def index(request):
return render(request,'CodeMate/landing_page.html',{})
#intermediate page for saving a new code
def addObject(request):
language=request.POST['language']
content=request.POST['content']
expire=request.POST['expire']
slug=request.POST['slug']
newPaste=Code(language=language,content=content,expire=expire,slug=slug)
newPaste.save()
redirectionString='/codemate/paste/'+str(slug)
return HttpResponseRedirect(redirectionString);
#page that loads the new code creation form
def createView(request):
return render(request,'CodeMate/code_form.html',{})
#login intermediate. checks for correct username/password pair
def authUser(request):
username=request.POST['username']
password=request.POST['password']
if(User.objects.filter(username=username).exists()):
user=User.objects.get(username=username)
if(user.check_password(password)):
print('Credential Verified!')
login(request,user)
return redirect('code:loggedIn')
print('Verification Failed!')
return redirect('code:loginFailure')
#login page
def logIn(request):
return render(request,'CodeMate/login.html',{})
#logged-in version of new code creation page
def loggedIn(request):
return render(request,'CodeMate/loggedIn.html',{})
#logout intermediate
def logOut(request):
logout(request)
return redirect('code:create')
#page that loads on username/password mismatch
def loginFailure(request):
return render(request,'CodeMate/loginFailure.html',{'error_msg':'Invalid username or password!'})
#signup intermediate. checks for validity of supplied fields
def validUser(request):
frst_name=request.POST['fname']
last_name=request.POST['lname']
username=request.POST['uid']
password=request.POST['<PASSWORD>']
passcnfrm=request.POST['passcnfrm']
if(not validity(username)):
return render(request,'CodeMate/signup.html',{'error_msg':'Username already exists!'})
if(len(username)<6):
return render(request,'CodeMate/signup.html',{'error_msg':'Username must be atleast 6 characters long!'})
if(len(password)<6):
return render(request,'CodeMate/signup.html',{'error_msg':'Password must be atleast 6 characters long!'})
if(not password==<PASSWORD>):
return render(request,'CodeMate/signup.html',{'error_msg':'Passwords do no match!'})
user=User(first_name=frst_name,last_name=last_name,username=username)
user.set_password(password)
user.save()
return redirect('code:logIn')
#signup page
def signup(request):
return render(request,'CodeMate/signup.html',{})
#intermediate page thet loads when 'save' button is clicked. modifies the
#existing code of the object
def modify(request,slug):
code=Code.objects.get(slug=slug)
code.content=request.POST['content']
code.save()
redirectString='/codemate/paste/'+str(slug)
return redirect(redirectString)
#generic view to delete an object. invoked when 'delete' is clicked.
class deleteView(generic.DeleteView):
model = Code
success_url = reverse_lazy('code:create')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
#the final page which contains the url and actual code after creation
class details(generic.DetailView):
model = Code
template_name = 'CodeMate/details.html'
def get_queryset(self):
global loggedIn
return Code.objects.filter(slug=self.kwargs['slug']) | CodeMate/views.py | from django.shortcuts import render, redirect
from django.template import loader
from django.http import HttpResponseRedirect
from django.views import generic
from .models import Code, User
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, DeleteView
from django.contrib.auth import views, authenticate, login, logout
loggedIn = False
#checks validity of the username requested by client
def validity(uid):
#put in email id checker code
if(User.objects.filter(username=uid).exists()):
return False
return True
#homepage
def index(request):
return render(request,'CodeMate/landing_page.html',{})
#intermediate page for saving a new code
def addObject(request):
language=request.POST['language']
content=request.POST['content']
expire=request.POST['expire']
slug=request.POST['slug']
newPaste=Code(language=language,content=content,expire=expire,slug=slug)
newPaste.save()
redirectionString='/codemate/paste/'+str(slug)
return HttpResponseRedirect(redirectionString);
#page that loads the new code creation form
def createView(request):
return render(request,'CodeMate/code_form.html',{})
#login intermediate. checks for correct username/password pair
def authUser(request):
username=request.POST['username']
password=request.POST['password']
if(User.objects.filter(username=username).exists()):
user=User.objects.get(username=username)
if(user.check_password(password)):
print('Credential Verified!')
login(request,user)
return redirect('code:loggedIn')
print('Verification Failed!')
return redirect('code:loginFailure')
#login page
def logIn(request):
return render(request,'CodeMate/login.html',{})
#logged-in version of new code creation page
def loggedIn(request):
return render(request,'CodeMate/loggedIn.html',{})
#logout intermediate
def logOut(request):
logout(request)
return redirect('code:create')
#page that loads on username/password mismatch
def loginFailure(request):
return render(request,'CodeMate/loginFailure.html',{'error_msg':'Invalid username or password!'})
#signup intermediate. checks for validity of supplied fields
def validUser(request):
frst_name=request.POST['fname']
last_name=request.POST['lname']
username=request.POST['uid']
password=request.POST['<PASSWORD>']
passcnfrm=request.POST['passcnfrm']
if(not validity(username)):
return render(request,'CodeMate/signup.html',{'error_msg':'Username already exists!'})
if(len(username)<6):
return render(request,'CodeMate/signup.html',{'error_msg':'Username must be atleast 6 characters long!'})
if(len(password)<6):
return render(request,'CodeMate/signup.html',{'error_msg':'Password must be atleast 6 characters long!'})
if(not password==<PASSWORD>):
return render(request,'CodeMate/signup.html',{'error_msg':'Passwords do no match!'})
user=User(first_name=frst_name,last_name=last_name,username=username)
user.set_password(password)
user.save()
return redirect('code:logIn')
#signup page
def signup(request):
return render(request,'CodeMate/signup.html',{})
#intermediate page thet loads when 'save' button is clicked. modifies the
#existing code of the object
def modify(request,slug):
code=Code.objects.get(slug=slug)
code.content=request.POST['content']
code.save()
redirectString='/codemate/paste/'+str(slug)
return redirect(redirectString)
#generic view to delete an object. invoked when 'delete' is clicked.
class deleteView(generic.DeleteView):
model = Code
success_url = reverse_lazy('code:create')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
#the final page which contains the url and actual code after creation
class details(generic.DetailView):
model = Code
template_name = 'CodeMate/details.html'
def get_queryset(self):
global loggedIn
return Code.objects.filter(slug=self.kwargs['slug']) | 0.264643 | 0.048994 |
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
import tensorflow as tf
import os, cv2
import numpy as np
import pandas as pd
import random, tqdm
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from tensorflow import keras
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import albumentations as album
from patchify import patchify
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Activation, Reshape, Convolution2D, BatchNormalization
class MaxPoolingWithArgmax2D(Layer):
def __init__(self, pool_size=(2, 2), strides=(2, 2), padding="same", **kwargs):
super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
self.padding = padding
self.pool_size = pool_size
self.strides = strides
def call(self, inputs, **kwargs):
padding = self.padding
pool_size = self.pool_size
strides = self.strides
if K.backend() == "tensorflow":
ksize = [1, pool_size[0], pool_size[1], 1]
padding = padding.upper()
strides = [1, strides[0], strides[1], 1]
output, argmax = tf.nn.max_pool_with_argmax(
inputs, ksize=ksize, strides=strides, padding=padding
)
else:
errmsg = "{} backend is not supported for layer {}".format(
K.backend(), type(self).__name__
)
raise NotImplementedError(errmsg)
argmax = K.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [
dim // ratio[idx] if dim is not None else None
for idx, dim in enumerate(input_shape)
]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
class MaxUnpooling2D(Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = size
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
with tf.compat.v1.variable_scope(self.name):
mask = K.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
if output_shape is None:
output_shape = (
input_shape[0],
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3])
ret = tf.scatter_nd(K.expand_dims(K.flatten(mask)),
K.flatten(updates),
[K.prod(output_shape)])
input_shape = updates.shape
out_shape = [-1,
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3]]
return K.reshape(ret, out_shape)
def get_config(self):
config = super().get_config().copy()
config.update({
'size': self.size
})
return config
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
return (
mask_shape[0],
mask_shape[1]*self.size[0],
mask_shape[2]*self.size[1],
mask_shape[3]
)
def segnet(
input_shape,
n_labels,
kernel=3,
pool_size=(2, 2),
activation="sigmoid"):
# encoder
inputs = Input(shape=input_shape)
conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)
conv_3 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(conv_3)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)
conv_5 = Convolution2D(256, (kernel, kernel), padding="same")(pool_2)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_6 = Convolution2D(256, (kernel, kernel), padding="same")(conv_5)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_7 = Convolution2D(256, (kernel, kernel), padding="same")(conv_6)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)
pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)
conv_8 = Convolution2D(512, (kernel, kernel), padding="same")(pool_3)
conv_8 = BatchNormalization()(conv_8)
conv_8 = Activation("relu")(conv_8)
conv_9 = Convolution2D(512, (kernel, kernel), padding="same")(conv_8)
conv_9 = BatchNormalization()(conv_9)
conv_9 = Activation("relu")(conv_9)
conv_10 = Convolution2D(512, (kernel, kernel), padding="same")(conv_9)
conv_10 = BatchNormalization()(conv_10)
conv_10 = Activation("relu")(conv_10)
pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size)(conv_10)
conv_11 = Convolution2D(512, (kernel, kernel), padding="same")(pool_4)
conv_11 = BatchNormalization()(conv_11)
conv_11 = Activation("relu")(conv_11)
conv_12 = Convolution2D(512, (kernel, kernel), padding="same")(conv_11)
conv_12 = BatchNormalization()(conv_12)
conv_12 = Activation("relu")(conv_12)
conv_13 = Convolution2D(512, (kernel, kernel), padding="same")(conv_12)
conv_13 = BatchNormalization()(conv_13)
conv_13 = Activation("relu")(conv_13)
pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size)(conv_13)
print("Build encoder done..")
# decoder
unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])
conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1)
conv_14 = BatchNormalization()(conv_14)
conv_14 = Activation("relu")(conv_14)
conv_15 = Convolution2D(512, (kernel, kernel), padding="same")(conv_14)
conv_15 = BatchNormalization()(conv_15)
conv_15 = Activation("relu")(conv_15)
conv_16 = Convolution2D(512, (kernel, kernel), padding="same")(conv_15)
conv_16 = BatchNormalization()(conv_16)
conv_16 = Activation("relu")(conv_16)
unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])
conv_17 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_2)
conv_17 = BatchNormalization()(conv_17)
conv_17 = Activation("relu")(conv_17)
conv_18 = Convolution2D(512, (kernel, kernel), padding="same")(conv_17)
conv_18 = BatchNormalization()(conv_18)
conv_18 = Activation("relu")(conv_18)
conv_19 = Convolution2D(256, (kernel, kernel), padding="same")(conv_18)
conv_19 = BatchNormalization()(conv_19)
conv_19 = Activation("relu")(conv_19)
unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])
conv_20 = Convolution2D(256, (kernel, kernel), padding="same")(unpool_3)
conv_20 = BatchNormalization()(conv_20)
conv_20 = Activation("relu")(conv_20)
conv_21 = Convolution2D(256, (kernel, kernel), padding="same")(conv_20)
conv_21 = BatchNormalization()(conv_21)
conv_21 = Activation("relu")(conv_21)
conv_22 = Convolution2D(128, (kernel, kernel), padding="same")(conv_21)
conv_22 = BatchNormalization()(conv_22)
conv_22 = Activation("relu")(conv_22)
unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])
conv_23 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_4)
conv_23 = BatchNormalization()(conv_23)
conv_23 = Activation("relu")(conv_23)
conv_24 = Convolution2D(64, (kernel, kernel), padding="same")(conv_23)
conv_24 = BatchNormalization()(conv_24)
conv_24 = Activation("relu")(conv_24)
unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])
conv_25 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_5)
conv_25 = BatchNormalization()(conv_25)
conv_25 = Activation("relu")(conv_25)
conv_26 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_25)
conv_26 = BatchNormalization()(conv_26)
outputs = Activation(activation)(conv_26)
print("Build decoder done..")
model = Model(inputs=inputs, outputs=outputs, name="SegNet")
return model | src/segnetlayerkeras.py | from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
import tensorflow as tf
import os, cv2
import numpy as np
import pandas as pd
import random, tqdm
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from tensorflow import keras
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import albumentations as album
from patchify import patchify
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Activation, Reshape, Convolution2D, BatchNormalization
class MaxPoolingWithArgmax2D(Layer):
def __init__(self, pool_size=(2, 2), strides=(2, 2), padding="same", **kwargs):
super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
self.padding = padding
self.pool_size = pool_size
self.strides = strides
def call(self, inputs, **kwargs):
padding = self.padding
pool_size = self.pool_size
strides = self.strides
if K.backend() == "tensorflow":
ksize = [1, pool_size[0], pool_size[1], 1]
padding = padding.upper()
strides = [1, strides[0], strides[1], 1]
output, argmax = tf.nn.max_pool_with_argmax(
inputs, ksize=ksize, strides=strides, padding=padding
)
else:
errmsg = "{} backend is not supported for layer {}".format(
K.backend(), type(self).__name__
)
raise NotImplementedError(errmsg)
argmax = K.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [
dim // ratio[idx] if dim is not None else None
for idx, dim in enumerate(input_shape)
]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
class MaxUnpooling2D(Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = size
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
with tf.compat.v1.variable_scope(self.name):
mask = K.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
if output_shape is None:
output_shape = (
input_shape[0],
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3])
ret = tf.scatter_nd(K.expand_dims(K.flatten(mask)),
K.flatten(updates),
[K.prod(output_shape)])
input_shape = updates.shape
out_shape = [-1,
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3]]
return K.reshape(ret, out_shape)
def get_config(self):
config = super().get_config().copy()
config.update({
'size': self.size
})
return config
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
return (
mask_shape[0],
mask_shape[1]*self.size[0],
mask_shape[2]*self.size[1],
mask_shape[3]
)
def segnet(
input_shape,
n_labels,
kernel=3,
pool_size=(2, 2),
activation="sigmoid"):
# encoder
inputs = Input(shape=input_shape)
conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)
conv_3 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(conv_3)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)
conv_5 = Convolution2D(256, (kernel, kernel), padding="same")(pool_2)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_6 = Convolution2D(256, (kernel, kernel), padding="same")(conv_5)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_7 = Convolution2D(256, (kernel, kernel), padding="same")(conv_6)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)
pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)
conv_8 = Convolution2D(512, (kernel, kernel), padding="same")(pool_3)
conv_8 = BatchNormalization()(conv_8)
conv_8 = Activation("relu")(conv_8)
conv_9 = Convolution2D(512, (kernel, kernel), padding="same")(conv_8)
conv_9 = BatchNormalization()(conv_9)
conv_9 = Activation("relu")(conv_9)
conv_10 = Convolution2D(512, (kernel, kernel), padding="same")(conv_9)
conv_10 = BatchNormalization()(conv_10)
conv_10 = Activation("relu")(conv_10)
pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size)(conv_10)
conv_11 = Convolution2D(512, (kernel, kernel), padding="same")(pool_4)
conv_11 = BatchNormalization()(conv_11)
conv_11 = Activation("relu")(conv_11)
conv_12 = Convolution2D(512, (kernel, kernel), padding="same")(conv_11)
conv_12 = BatchNormalization()(conv_12)
conv_12 = Activation("relu")(conv_12)
conv_13 = Convolution2D(512, (kernel, kernel), padding="same")(conv_12)
conv_13 = BatchNormalization()(conv_13)
conv_13 = Activation("relu")(conv_13)
pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size)(conv_13)
print("Build encoder done..")
# decoder
unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])
conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1)
conv_14 = BatchNormalization()(conv_14)
conv_14 = Activation("relu")(conv_14)
conv_15 = Convolution2D(512, (kernel, kernel), padding="same")(conv_14)
conv_15 = BatchNormalization()(conv_15)
conv_15 = Activation("relu")(conv_15)
conv_16 = Convolution2D(512, (kernel, kernel), padding="same")(conv_15)
conv_16 = BatchNormalization()(conv_16)
conv_16 = Activation("relu")(conv_16)
unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])
conv_17 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_2)
conv_17 = BatchNormalization()(conv_17)
conv_17 = Activation("relu")(conv_17)
conv_18 = Convolution2D(512, (kernel, kernel), padding="same")(conv_17)
conv_18 = BatchNormalization()(conv_18)
conv_18 = Activation("relu")(conv_18)
conv_19 = Convolution2D(256, (kernel, kernel), padding="same")(conv_18)
conv_19 = BatchNormalization()(conv_19)
conv_19 = Activation("relu")(conv_19)
unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])
conv_20 = Convolution2D(256, (kernel, kernel), padding="same")(unpool_3)
conv_20 = BatchNormalization()(conv_20)
conv_20 = Activation("relu")(conv_20)
conv_21 = Convolution2D(256, (kernel, kernel), padding="same")(conv_20)
conv_21 = BatchNormalization()(conv_21)
conv_21 = Activation("relu")(conv_21)
conv_22 = Convolution2D(128, (kernel, kernel), padding="same")(conv_21)
conv_22 = BatchNormalization()(conv_22)
conv_22 = Activation("relu")(conv_22)
unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])
conv_23 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_4)
conv_23 = BatchNormalization()(conv_23)
conv_23 = Activation("relu")(conv_23)
conv_24 = Convolution2D(64, (kernel, kernel), padding="same")(conv_23)
conv_24 = BatchNormalization()(conv_24)
conv_24 = Activation("relu")(conv_24)
unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])
conv_25 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_5)
conv_25 = BatchNormalization()(conv_25)
conv_25 = Activation("relu")(conv_25)
conv_26 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_25)
conv_26 = BatchNormalization()(conv_26)
outputs = Activation(activation)(conv_26)
print("Build decoder done..")
model = Model(inputs=inputs, outputs=outputs, name="SegNet")
return model | 0.866472 | 0.470737 |
from itertools import combinations, product
from math import comb
from typing import List, Optional
from pymatgen.analysis.interface_reactions import InterfacialReactivity
from pymatgen.core.composition import Element
from pymatgen.entries.computed_entries import ComputedEntry
from tqdm.auto import tqdm
from rxn_network.enumerators.basic import BasicEnumerator
from rxn_network.enumerators.utils import (
apply_calculators,
get_computed_rxn,
get_open_computed_rxn,
)
class MinimizeGibbsEnumerator(BasicEnumerator):
"""
Enumerator for finding all reactions between two reactants that are predicted by
thermodynamics; i.e., they appear when taking the convex hull along a straight
line connecting any two phases in G-x phase space. Identity reactions are
automatically excluded.
"""
def __init__(
self,
precursors: Optional[List[str]] = None,
target: Optional[str] = None,
calculators: Optional[List[str]] = None,
):
"""
Args:
precursors: Optional formulas of precursors.
target: Optional formula of target; only reactions which make this target
will be enumerated.
calculators: Optional list of Calculator object names; see calculators
module for options (e.g., ["ChempotDistanceCalculator])
"""
super().__init__(precursors, target, calculators)
self._build_pd = True
def estimate_max_num_reactions(self, entries: List[ComputedEntry]) -> int:
"""
Estimate the upper bound of the number of possible reactions. This will
correlate with the amount of time it takes to enumerate reactions.
Args:
entries: A list of all entries to consider
Returns: The upper bound on the number of possible reactions
"""
return comb(len(entries), 2) * 2
def _react(self, reactants, products, calculators, pd=None, grand_pd=None):
r = list(reactants)
r0 = r[0]
if len(r) == 1:
r1 = r[0]
else:
r1 = r[1]
return self._react_interface(
r0.composition, r1.composition, pd, grand_pd, calculators=calculators
)
def _get_rxn_iterable(self, combos, open_entries):
return product(combos, [None])
def _react_interface(self, r1, r2, pd, grand_pd=None, calculators=None):
"""Simple API for InterfacialReactivity module from pymatgen."""
chempots = None
if grand_pd:
interface = InterfacialReactivity(
r1,
r2,
grand_pd,
norm=True,
include_no_mixing_energy=False,
pd_non_grand=pd,
use_hull_energy=True,
)
chempots = grand_pd.chempots
else:
interface = InterfacialReactivity(
r1,
r2,
pd,
norm=False,
include_no_mixing_energy=False,
pd_non_grand=None,
use_hull_energy=True,
)
rxns = []
for _, _, _, rxn, _ in interface.get_kinks():
if grand_pd:
rxn = get_open_computed_rxn(rxn, pd.all_entries, chempots)
else:
rxn = get_computed_rxn(rxn, pd.all_entries)
rxn = apply_calculators(rxn, calculators)
rxns.append(rxn)
return rxns
class MinimizeGrandPotentialEnumerator(MinimizeGibbsEnumerator):
"""
Enumerator for finding all reactions between two reactants and an open element
that are predicted by thermo; i.e., they appear when taking the
convex hull along a straight line connecting any two phases in Phi-x
phase space. Identity reactions are excluded.
"""
def __init__(
self,
open_elem: Element,
mu: float,
precursors: Optional[List[str]] = None,
target: Optional[str] = None,
calculators: Optional[List[str]] = None,
):
super().__init__(precursors=precursors, target=target, calculators=calculators)
self.open_elem = Element(open_elem) # type: ignore
self.mu = mu
self.chempots = {self.open_elem: self.mu}
self._build_grand_pd = True
def _react(self, reactants, products, calculators, pd=None, grand_pd=None):
r = list(reactants)
r0 = r[0]
if len(r) == 1:
r1 = r[0]
else:
r1 = r[1]
open_elem = list(grand_pd.chempots.keys())[0]
for reactant in r:
elems = reactant.composition.elements
if len(elems) == 1 and elems[0] == open_elem: # skip if reactant = open_e
return []
return self._react_interface(
r0.composition,
r1.composition,
pd,
grand_pd=grand_pd,
calculators=calculators,
) | src/rxn_network/enumerators/minimize.py | from itertools import combinations, product
from math import comb
from typing import List, Optional
from pymatgen.analysis.interface_reactions import InterfacialReactivity
from pymatgen.core.composition import Element
from pymatgen.entries.computed_entries import ComputedEntry
from tqdm.auto import tqdm
from rxn_network.enumerators.basic import BasicEnumerator
from rxn_network.enumerators.utils import (
apply_calculators,
get_computed_rxn,
get_open_computed_rxn,
)
class MinimizeGibbsEnumerator(BasicEnumerator):
"""
Enumerator for finding all reactions between two reactants that are predicted by
thermodynamics; i.e., they appear when taking the convex hull along a straight
line connecting any two phases in G-x phase space. Identity reactions are
automatically excluded.
"""
def __init__(
self,
precursors: Optional[List[str]] = None,
target: Optional[str] = None,
calculators: Optional[List[str]] = None,
):
"""
Args:
precursors: Optional formulas of precursors.
target: Optional formula of target; only reactions which make this target
will be enumerated.
calculators: Optional list of Calculator object names; see calculators
module for options (e.g., ["ChempotDistanceCalculator])
"""
super().__init__(precursors, target, calculators)
self._build_pd = True
def estimate_max_num_reactions(self, entries: List[ComputedEntry]) -> int:
"""
Estimate the upper bound of the number of possible reactions. This will
correlate with the amount of time it takes to enumerate reactions.
Args:
entries: A list of all entries to consider
Returns: The upper bound on the number of possible reactions
"""
return comb(len(entries), 2) * 2
def _react(self, reactants, products, calculators, pd=None, grand_pd=None):
r = list(reactants)
r0 = r[0]
if len(r) == 1:
r1 = r[0]
else:
r1 = r[1]
return self._react_interface(
r0.composition, r1.composition, pd, grand_pd, calculators=calculators
)
def _get_rxn_iterable(self, combos, open_entries):
return product(combos, [None])
def _react_interface(self, r1, r2, pd, grand_pd=None, calculators=None):
"""Simple API for InterfacialReactivity module from pymatgen."""
chempots = None
if grand_pd:
interface = InterfacialReactivity(
r1,
r2,
grand_pd,
norm=True,
include_no_mixing_energy=False,
pd_non_grand=pd,
use_hull_energy=True,
)
chempots = grand_pd.chempots
else:
interface = InterfacialReactivity(
r1,
r2,
pd,
norm=False,
include_no_mixing_energy=False,
pd_non_grand=None,
use_hull_energy=True,
)
rxns = []
for _, _, _, rxn, _ in interface.get_kinks():
if grand_pd:
rxn = get_open_computed_rxn(rxn, pd.all_entries, chempots)
else:
rxn = get_computed_rxn(rxn, pd.all_entries)
rxn = apply_calculators(rxn, calculators)
rxns.append(rxn)
return rxns
class MinimizeGrandPotentialEnumerator(MinimizeGibbsEnumerator):
"""
Enumerator for finding all reactions between two reactants and an open element
that are predicted by thermo; i.e., they appear when taking the
convex hull along a straight line connecting any two phases in Phi-x
phase space. Identity reactions are excluded.
"""
def __init__(
self,
open_elem: Element,
mu: float,
precursors: Optional[List[str]] = None,
target: Optional[str] = None,
calculators: Optional[List[str]] = None,
):
super().__init__(precursors=precursors, target=target, calculators=calculators)
self.open_elem = Element(open_elem) # type: ignore
self.mu = mu
self.chempots = {self.open_elem: self.mu}
self._build_grand_pd = True
def _react(self, reactants, products, calculators, pd=None, grand_pd=None):
r = list(reactants)
r0 = r[0]
if len(r) == 1:
r1 = r[0]
else:
r1 = r[1]
open_elem = list(grand_pd.chempots.keys())[0]
for reactant in r:
elems = reactant.composition.elements
if len(elems) == 1 and elems[0] == open_elem: # skip if reactant = open_e
return []
return self._react_interface(
r0.composition,
r1.composition,
pd,
grand_pd=grand_pd,
calculators=calculators,
) | 0.934522 | 0.374276 |
class Player():
"""docstring for Player"""
def __init__(self):
super(Player, self).__init__()
self.inventory = []
class Scene():
"""docstring for Scene"""
def __init__(self, intro, keywords, player=None, condition=None, success=None, fail=None):
super(Scene, self).__init__()
self.intro = intro
self.keywords = keywords
self.player = player
self.condition = condition
self.success = success
self.fail = fail
def run(self):
print("\n\n\n\n")
print(self.intro)
print("\n")
if self.player and self.condition:
if self.condition in self.player.inventory:
return self.success
else:
return self.fail
while True:
reponse = input(">").lower()
if reponse in self.keywords:
return reponse
class Giveaway():
"""docstring for ClassName"""
def __init__(self, item, player, callback):
super(Giveaway, self).__init__()
self.item = item
self.player = player
self.callback = callback
def run(self):
self.player.inventory.append(self.item)
return self.callback
player = Player()
scenes = {
'départ': Scene(
intro="Bonjour visiteurs, bienvenue à Antswood.\n\nUne forêt où cohabitent différentes espèces (comme ici une fourmi et une abeille) qui, ensemble, forment un écosystème complexe rempli de personnages, d’actions (et réactions), d’intrigues et de challenges à accomplir.",
keywords=['fourmi','abeille','forêt'],
),
'fourmi':Scene(
intro="Bonjour, je suis fourmi #27903. \n\nNous les fourmis entretenons les arbres et la forêt. Notre objectif: maintenir un certain équilibre dans l’écosystème.",
keywords=['start','abeille','forêt'],
),
'abeille': Scene(
intro="Bonjour, je suis une abeille. Nous nous chargeons de polliniser les fleurs. Notre objectif: trouver des fleurs. Parfois nous y trouvons des graines :)",
keywords=['départ','fourmi','forêt','graines'],
),
'graines': Giveaway('graines',player,'abeille2'),
'abeille2': Scene(
intro="Voici, prenez ces graines. Elles vous surront sûrement plus utiles.",
keywords=['départ','fourmi','forêt'],
),
'forêt': Scene(
intro="Vous vous balladez en forêt, vous vous appercevez que des arbres ont disparu.\nNous pourrions peut-être en planter.",
keywords=['départ','planter'],
),
'planter': Scene(
intro="...",
keywords=['départ'],
player=player,
condition='graines',
success='3ND',
fail='forêt'
),
'3ND': Scene(
intro="Vous avez planté les graines. Bien joué!",
keywords=['départ']
)
}
def main():
scene = "départ"
while True:
scene = scenes[scene].run()
main() | game.py | class Player():
"""docstring for Player"""
def __init__(self):
super(Player, self).__init__()
self.inventory = []
class Scene():
"""docstring for Scene"""
def __init__(self, intro, keywords, player=None, condition=None, success=None, fail=None):
super(Scene, self).__init__()
self.intro = intro
self.keywords = keywords
self.player = player
self.condition = condition
self.success = success
self.fail = fail
def run(self):
print("\n\n\n\n")
print(self.intro)
print("\n")
if self.player and self.condition:
if self.condition in self.player.inventory:
return self.success
else:
return self.fail
while True:
reponse = input(">").lower()
if reponse in self.keywords:
return reponse
class Giveaway():
"""docstring for ClassName"""
def __init__(self, item, player, callback):
super(Giveaway, self).__init__()
self.item = item
self.player = player
self.callback = callback
def run(self):
self.player.inventory.append(self.item)
return self.callback
player = Player()
scenes = {
'départ': Scene(
intro="Bonjour visiteurs, bienvenue à Antswood.\n\nUne forêt où cohabitent différentes espèces (comme ici une fourmi et une abeille) qui, ensemble, forment un écosystème complexe rempli de personnages, d’actions (et réactions), d’intrigues et de challenges à accomplir.",
keywords=['fourmi','abeille','forêt'],
),
'fourmi':Scene(
intro="Bonjour, je suis fourmi #27903. \n\nNous les fourmis entretenons les arbres et la forêt. Notre objectif: maintenir un certain équilibre dans l’écosystème.",
keywords=['start','abeille','forêt'],
),
'abeille': Scene(
intro="Bonjour, je suis une abeille. Nous nous chargeons de polliniser les fleurs. Notre objectif: trouver des fleurs. Parfois nous y trouvons des graines :)",
keywords=['départ','fourmi','forêt','graines'],
),
'graines': Giveaway('graines',player,'abeille2'),
'abeille2': Scene(
intro="Voici, prenez ces graines. Elles vous surront sûrement plus utiles.",
keywords=['départ','fourmi','forêt'],
),
'forêt': Scene(
intro="Vous vous balladez en forêt, vous vous appercevez que des arbres ont disparu.\nNous pourrions peut-être en planter.",
keywords=['départ','planter'],
),
'planter': Scene(
intro="...",
keywords=['départ'],
player=player,
condition='graines',
success='3ND',
fail='forêt'
),
'3ND': Scene(
intro="Vous avez planté les graines. Bien joué!",
keywords=['départ']
)
}
def main():
scene = "départ"
while True:
scene = scenes[scene].run()
main() | 0.480479 | 0.226527 |
import copy
import unittest
import verify_config
class VerifyConfigTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.schema = {"properties": {"ingress": {"type": "object"},
"egress": {"type": "object"},
"routingTable": {"type": "array"}},
"required": ["ingress", "egress", "routingTable"]}
interface = {
"useDHCP": False,
"ping": True,
"mtu": 9000
}
cls.config = {"ingress": interface,
"egress": interface,
"routingTable": [
{
"ingressPort": 50000,
"egressIpAddress": "192.168.0.20",
"egressSrcPort": 60000,
"egressDestPort": 60600
},
{
"ingressPort": 50500,
"egressIpAddress": "192.168.0.21",
"egressSrcPort": 60004,
"egressDestPort": 61004
}
]
}
def test_empty_config_throws_error(self):
self.assertRaises(verify_config.ConfigErrorEmptyFile,
verify_config.VerifyConfig({}).validate,
{})
def test_config_file_longer_than_max_length_throws_error(self):
self.assertRaises(verify_config.ConfigErrorFileSizeTooLarge,
verify_config.VerifyConfig(schema={}, max_config_bytes=10).validate,
config={"ingress": {}, "egress": {}, "routingTable": []})
def test_config_file_matches_schema(self):
verify_config.VerifyConfig(self.schema).validate(self.config)
def test_config_file_that_does_not_match_schema_throws_error(self):
self.assertRaises(verify_config.ConfigErrorFailedSchemaVerification,
verify_config.VerifyConfig(self.schema).validate,
{"ingress": {}})
def test_port_span_exceeds_2048_throws_error(self):
config_port_span_too_large = copy.deepcopy(self.config)
config_port_span_too_large["routingTable"] = [
{
"ingressPort": 40000,
"egressIpAddress": "192.168.0.20",
"egressSrcPort": 50001,
"egressDestPort": 50001
},
{
"ingressPort": 42048,
"egressIpAddress": "192.168.0.21",
"egressSrcPort": 51024,
"egressDestPort": 51024
}
]
self.assertRaisesRegex(verify_config.ConfigErrorInvalidPortSpan,
"Config validation failed: Ingress portSpan must be less than 2048.",
verify_config.VerifyConfig(self.schema).validate,
config_port_span_too_large)
def test_ingress_ports_not_unique_throws_error(self):
config_ports_not_unique = copy.deepcopy(self.config)
config_ports_not_unique["routingTable"] = [
{
"ingressPort": 40000,
"egressIpAddress": "192.168.0.20",
"egressSrcPort": 50001,
"egressDestPort": 50001
},
{
"ingressPort": 40000,
"egressIpAddress": "192.168.0.21",
"egressSrcPort": 51024,
"egressDestPort": 51024
}
]
self.assertRaisesRegex(verify_config.ConfigErrorIngressPortsNotUnique,
"Config validation failed: Ingress ports must be unique.",
verify_config.VerifyConfig(self.schema).validate,
config_ports_not_unique)
if __name__ == '__main__':
unittest.main() | Emulator/verify_config_tests.py |
import copy
import unittest
import verify_config
class VerifyConfigTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.schema = {"properties": {"ingress": {"type": "object"},
"egress": {"type": "object"},
"routingTable": {"type": "array"}},
"required": ["ingress", "egress", "routingTable"]}
interface = {
"useDHCP": False,
"ping": True,
"mtu": 9000
}
cls.config = {"ingress": interface,
"egress": interface,
"routingTable": [
{
"ingressPort": 50000,
"egressIpAddress": "192.168.0.20",
"egressSrcPort": 60000,
"egressDestPort": 60600
},
{
"ingressPort": 50500,
"egressIpAddress": "192.168.0.21",
"egressSrcPort": 60004,
"egressDestPort": 61004
}
]
}
def test_empty_config_throws_error(self):
self.assertRaises(verify_config.ConfigErrorEmptyFile,
verify_config.VerifyConfig({}).validate,
{})
def test_config_file_longer_than_max_length_throws_error(self):
self.assertRaises(verify_config.ConfigErrorFileSizeTooLarge,
verify_config.VerifyConfig(schema={}, max_config_bytes=10).validate,
config={"ingress": {}, "egress": {}, "routingTable": []})
def test_config_file_matches_schema(self):
verify_config.VerifyConfig(self.schema).validate(self.config)
def test_config_file_that_does_not_match_schema_throws_error(self):
self.assertRaises(verify_config.ConfigErrorFailedSchemaVerification,
verify_config.VerifyConfig(self.schema).validate,
{"ingress": {}})
def test_port_span_exceeds_2048_throws_error(self):
config_port_span_too_large = copy.deepcopy(self.config)
config_port_span_too_large["routingTable"] = [
{
"ingressPort": 40000,
"egressIpAddress": "192.168.0.20",
"egressSrcPort": 50001,
"egressDestPort": 50001
},
{
"ingressPort": 42048,
"egressIpAddress": "192.168.0.21",
"egressSrcPort": 51024,
"egressDestPort": 51024
}
]
self.assertRaisesRegex(verify_config.ConfigErrorInvalidPortSpan,
"Config validation failed: Ingress portSpan must be less than 2048.",
verify_config.VerifyConfig(self.schema).validate,
config_port_span_too_large)
def test_ingress_ports_not_unique_throws_error(self):
config_ports_not_unique = copy.deepcopy(self.config)
config_ports_not_unique["routingTable"] = [
{
"ingressPort": 40000,
"egressIpAddress": "192.168.0.20",
"egressSrcPort": 50001,
"egressDestPort": 50001
},
{
"ingressPort": 40000,
"egressIpAddress": "192.168.0.21",
"egressSrcPort": 51024,
"egressDestPort": 51024
}
]
self.assertRaisesRegex(verify_config.ConfigErrorIngressPortsNotUnique,
"Config validation failed: Ingress ports must be unique.",
verify_config.VerifyConfig(self.schema).validate,
config_ports_not_unique)
if __name__ == '__main__':
unittest.main() | 0.562657 | 0.332527 |
import os
import re
import sys
import bz2
import bct
import math
import random
import pickle
import numpy as np
import scipy as sp
import pandas as pd
import dionysus as d
import networkx as nx
import cpnet as cpa
import gensim.utils as gu
import gensim.models as gm
import gensim.matutils as gmat
import gensim.parsing.preprocessing as gpp
import mwparserfromhell as mph
import xml.etree.ElementTree as ET
import sklearn.metrics.pairwise as smp
class Dump:
"""``Dump`` loads and parses dumps from wikipedia from
``path_xml`` with index ``path_idx``.
Attributes
----------
idx: dictionary
``{'page_name': (byte offset, page id, block size)}``
Cached. Lazy.
links: list of strings
All links.
article_links: list of strings
Article links (not files, categories, etc.)
years: list of int
Years in the History section of a wikipedia page
BC denoted as negative values
page: mwparserfromhell.wikicode
Current loaded wiki page
path_xml: string
Path to the zipped XML dump file.
path_idx: string
Path to the zipped index file.
offset_max: int
Maximum offset. Set as the size of the zipped dump.
cache: xml.etree.ElementTree.Node
Cache of the XML tree in current block
"""
MAX_YEAR = 2020
def __init__(self, path_xml, path_idx):
self._idx = {}
self._links = []
self._article_links = []
self._years = []
self._page = None
self.path_xml = path_xml
self.path_idx = path_idx
self.offset_max = 0
self.cache = (0, None) # offset, cache
@property
def idx(self):
if self._idx:
return self._idx
else:
print('Dump: Loading index...')
with bz2.BZ2File(self.path_idx, 'rb') as file:
lines = [line for line in file]
block_end = os.path.getsize(self.path_xml)
offset_prev = block_end
for line in reversed(lines):
offset, pid, name = line.strip().split(b':', 2)
offset, pid, name = (int(offset), int(pid), name.decode('utf8'))
block_end = offset_prev if offset < offset_prev else block_end
self._idx[name] = (offset, pid, block_end-offset)
offset_prev = offset
self.offset_max = max([x[0] for x in self._idx.values()])
print('Dump: Loaded.')
return self._idx
@property
def links(self):
if self._links:
return self._links
elif self.page:
self._links = [str(x.title) for x in self.page.filter_wikilinks()]
self._links = [link.split('#')[0] for link in self._links]
self._links = [link.split(' ') for link in self._links]
self._links = [[words[0].capitalize()] + words[1:] for words in self._links]
self._links = [' '.join(words) for words in self._links]
return self._links
else:
return self._links
@property
def article_links(self):
if self._article_links:
return self._article_links
elif self.links:
self._article_links = [x for x in self.links if ':' not in x]
return self._article_links
else:
return self._article_links
@property
def years(self):
if self._years:
return self._years
elif self.page:
history = Dump.get_history(self.page)
top = self.page.get_sections()[0].strip_code()
self._years = Dump.filter_years(top + history)
return self._years
else:
return self._years
@property
def page(self):
return self._page
@page.setter
def page(self, page):
self._page = page
self._links = []
self._article_links = []
self._years = []
def load_page(self, page_name, filter_top=False):
"""Loads & returs page (``mwparserfromhell.wikicode``)
named ``page_name`` from dump file. Returns only the
top section if ``filter_top``.
"""
if page_name not in self.idx.keys():
self.page = None
return
offset, pid, block_size = self.idx[page_name]
if offset == self.cache[0]:
root = self.cache[1]
else:
xml = Dump.fetch_block(self.path_xml, offset, block_size)
xml = b'<mediawiki>' + xml + b'</mediawiki>'*(offset != self.offset_max)
root = ET.fromstring(xml)
self.cache = (offset, root)
text = Dump.search_id(root, pid)
text = Dump.filter_top_section(text) if filter_top else text
self.page = mph.parse(text, skip_style_tags = True)
if self.page and 'REDIRECT' in self.page.strip_code():
redirect = self.page.filter_wikilinks()[0].title
return self.load_page(str(redirect))
else:
return self.page
@staticmethod
def fetch_block(path, offset, block_size):
""" Fetches block of ``block_size`` (``int``) bytes
at ``offset`` (``int``) in the zipped dump at
``path`` (``string``) and returns the uncompressed
text (``string``).
"""
with open(path, 'rb') as file:
file.seek(offset)
return bz2.decompress(file.read(block_size))
@staticmethod
def search_id(root, pid):
"""Returns the text of the page with id ``pid``"""
for page in root.iter('page'):
if pid == int(page.find('id').text):
return page.find('revision').find('text').text
@staticmethod
def filter_top_section(text):
"""Returns the top section of text,
where the first header has the form ``==Heading==``
"""
head = re.search(r'==.*?==', text)
idx = head.span(0)[0] if head else len(text)
return text[:idx] #(text[:idx], text[idx:])
@staticmethod
def get_history(page):
"""Returns the text of the history section.
Returns ``""`` if not found.
"""
headings = page.filter_headings()
idx = [i for i, head in enumerate(headings)
if 'History' in head or 'history' in head]
if not idx:
return ""
sections = page.get_sections(include_headings=True)
history = str(sections[idx[0]+1].strip_code())
return history
@staticmethod
def filter_years(text):
"""Filters the years from text."""
months = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december']
prepositions = ['around', 'after', 'at', 'as',
'approximately', 'before', 'between', 'by',
'during', 'from', 'in', 'near', 'past',
'since', 'until', 'within'] # removed: about, on
conjugations = ['and']
articles = ['the']
times = ['early', 'mid', 'late']
patterns = months + prepositions + conjugations + articles + times
re_string = r'\b(' + '|'.join(patterns) + r')\b(\s|-)\b([0-9]{3,4})s?\b(?i)(?!\sMYA)\s?(BCE|BC)?'
years = [int(match.group(3)) * (-2*bool(match.group(4))+1)
for match in re.finditer(re_string, text, re.IGNORECASE)]
re_string = r'([0-9]{1,2})(st|nd|rd|th) century\s?(BCE|BC)?'
centuries = [(int(match.group(1)) * 100 - 100) * (-2*bool(match.group(2))+1)
for match in re.finditer(re_string, text, re.IGNORECASE)]
years += centuries
years = [y for y in years if y<Dump.MAX_YEAR]
return sorted(years + centuries)
class Corpus:
"""``Corpus`` is an ``iterable`` & an ``iterator``
that uses ``Dump`` to iterate through articles.
Parameters
----------
dump: wiki.Dump
output: string
'doc' for array of documents
'tag' for TaggedDocument(doc, [self.i])
'bow' for bag of words [(int, int)]
dct: gensim.corpus.Dictionary
used to create BoW representation
"""
def __init__(self, dump, output='doc', dct=None):
self.dump = dump
self.names = list(self.dump.idx.keys())
self.output = output
self.dct = dct
def __iter__(self):
self.i = 0
return self
def __next__(self):
if self.i < len(self.names):
sys.stdout.write("\rCorpus index: " + str(self.i+1) +
'/' + str(len(self.names)))
sys.stdout.flush()
if self.output == 'doc':
doc = self[self.i]
elif self.output == 'tag':
doc = gm.doc2vec.TaggedDocument(self[self.i], [self.i])
elif self.output == 'bow':
doc = self.dct.doc2bow(self[self.i])
self.i += 1
return doc
else:
raise StopIteration
def __getitem__(self, index):
doc = self.dump.load_page(self.names[index])
return gu.simple_preprocess(doc.strip_code())
class GraphContainer():
"""
Attributes
----------
graph: networkx.DiGraph
node name is name of wikipedia page
``year`` attribute indicates year
numbered: networkx.DiGraph
node name is an index (see nodes),
``year`` is an index (see years), lazy
nodes: list
List of node names,
indexed by node in ``numbered``, lazy
years: list
List of years,
indexed by ``year`` in ``numbered``, lazy
nodes_for_year: dict
``{int year: [int node_index]}``, lazy
"""
def __init__(self):
self.graph = nx.DiGraph()
self._numbered = None
self._nodes = []
self._years = []
self._nodes_for_year = {}
@property
def numbered(self):
if self._numbered:
return self._numbered
else:
self._numbered = nx.DiGraph()
for node in self.graph.nodes:
self._numbered.add_node(self.nodes.index(node),
year = self.years.index(self.graph.nodes[node]['year']))
self._numbered.add_edges_from([(self.nodes.index(node), self.nodes.index(succ))
for succ in self.graph.successors(node)])
return self._numbered
@property
def nodes(self):
if self._nodes:
return self._nodes
else:
self._nodes = list(self.graph.nodes)
return self._nodes
@property
def years(self):
if self._years:
return self._years
else:
self._years = [self.graph.nodes[n]['year']
for n in self.graph.nodes()]
self._years = sorted(list(set(self._years)))
return self._years
@property
def nodes_for_year(self):
if self._nodes_for_year:
return self._nodes_for_year
else:
self._nodes_for_year = {year: [list(self.graph.nodes).index(n)
for n in self.graph.nodes
if self.graph.nodes[n]['year']==year]
for year in self.years}
return self._nodes_for_year
class PersistentHomology(GraphContainer):
"""
Attributes
----------
cliques: list of lists
lazy
filtration: dionysus.filtration
lazy
persistence: dionysus.reduced_matrix
lazy
barcodes: pandas.DataFrame
lazy
"""
def __init__(self):
GraphContainer.__init__(self)
self._cliques = None
self._filtration = None
self._persistence = None
self._barcodes = None
@property
def cliques(self):
if self._cliques:
return self._cliques
else:
self._cliques = list(nx.algorithms.clique.\
enumerate_all_cliques(nx.Graph(self.numbered)))
return self._cliques
@property
def filtration(self):
if self._filtration != None:
return self._filtration
else:
self._filtration = d.Filtration()
nodes_so_far = []
for year in self.years:
nodes_now = self.nodes_for_year[year]
nodes_so_far.extend(nodes_now)
for clique in self.cliques:
if all([n in nodes_so_far for n in clique]):
self._filtration.append(d.Simplex(clique, year))
self._filtration.sort()
return self._filtration
@property
def persistence(self):
if self._persistence:
return self._persistence
else:
self._persistence = d.homology_persistence(self.filtration)
return self._persistence
@property
def barcodes(self):
if isinstance(self._barcodes, pd.DataFrame)\
and len(self._barcodes.index) != 0:
return self._barcodes
else:
self._barcodes = PersistentHomology.compute_barcodes(
self.filtration, self.persistence, self.graph, self.nodes)
return self._barcodes
@staticmethod
def compute_barcodes(f, m, graph, names):
"""Uses dionysus filtration & persistence
(in reduced matrix form) to compute barcodes.
Parameters
----------
f: dionysus.Filtration
m: dionysus.ReducedMatrix
(see homology_persistence)
names: list of strings
names of node indices
Returns
-------
barcodes: pandas.DataFrame
"""
print('wiki.Net: computing barcodes... (skip negatives)')
node_list = list(graph.nodes)
barcodes = []
for i, c in enumerate(m):
if m.pair(i) < i: continue
sys.stdout.write("\rwiki.Net: barcode {}/{}".\
format(i+1,len(m)))
sys.stdout.flush()
dim = f[i].dimension()
birth_year = int(f[i].data)
birth_simplex = [names[s] for s in f[i]]
birth_nodes = [n for n in birth_simplex
if graph.nodes[n]['year']==birth_year]
if m.pair(i) != m.unpaired:
death_year = int(f[m.pair(i)].data)
death_simplex = [names[s] for s in f[m.pair(i)]]
death_nodes = [n for n in death_simplex
if graph.nodes[n]['year']==death_year]
else:
death_year = np.inf
death_simplex = []
death_nodes = []
pair = m.pair(i) if m.pair(i) != m.unpaired else np.inf
chain = m[pair] if pair != np.inf else m[i]
simp_comp = [f[entry.index] for entry in chain]
nodes = [node_list[idx] for simplex in simp_comp for idx in simplex]
barcodes.append([dim, birth_year, death_year, death_year-birth_year,
birth_simplex, death_simplex,
birth_nodes, death_nodes, list(set(nodes))])
print('')
barcodes.sort(key=lambda x: x[0])
bar_data = pd.DataFrame(data=barcodes,
columns=['dim', 'birth', 'death', 'lifetime',
'birth simplex', 'death simplex',
'birth nodes', 'death nodes',
'homology nodes'])
return bar_data
class Net(PersistentHomology):
"""``Net`` is a wrapper for ``networkx.DiGraph``.
Uses ``dionysus`` for persistence homology.
Attributes
----------
tfidf: scipy.sparse.csc.csc_matrix
sparse column matrix of tfidfs,
ordered by nodes, also stored in
```self.graph.graph['tfidf']```, lazy
MAX_YEAR: int
``year = MAX_YEAR (2020)`` for nodes with parents
without years
YEAR_FILLED_DELTA: int
``year = year of parents + YEAR_FILLED_DELTA (1)``
"""
MAX_YEAR = 2020
YEAR_FILLED_DELTA = 1
def __init__(self, path_graph='', path_barcodes=''):
PersistentHomology.__init__(self)
self._tfidf = None
if path_graph:
self.load_graph(path_graph)
if path_barcodes:
self.load_barcodes(path_barcodes)
@property
def tfidf(self):
if self._tfidf:
return self._tfidf
elif 'tfidf' in self.graph.graph.keys():
self._tfidf = self.graph.graph['tfidf']
return self._tfidf
def build_graph(self, name='', dump=None, nodes=None, depth_goal=1,
filter_top=True, remove_isolates=True, add_years=True,
fill_empty_years=True, model=None, dct=None,
compute_core_periphery=True, compute_communities=True,
compute_community_cores=True):
""" Builds ``self.graph`` (``networkx.Graph``) from nodes (``list``
of ``string``). Set ``model`` (from ``gensim``) and ``dct``
(``gensim.corpora.Dictionary``) for weighted edges.
"""
self.graph = nx.DiGraph()
self.graph.name = name
if not dump:
raise AttributeError('wiki.Net: Provide wiki.Dump object.')
print('wiki.Net: traversing Wikipedia...')
Net.bft(self.graph, dump, nodes, depth_goal=depth_goal,
nodes=nodes, filter_top=filter_top)
if remove_isolates:
print('wiki.Net: removing isolates...')
self.graph.remove_nodes_from(nx.isolates(self.graph))
if add_years:
print('wiki.Net: adding years...')
for node in self.graph.nodes:
dump.load_page(node)
self.graph.nodes[node]['year'] = dump.years[0] if len(dump.years)>0 else []
self.graph.graph['num_years'] = sum(
[bool(y) for y in nx.get_node_attributes(self.graph, 'year').values()]
)
if fill_empty_years:
print('wiki.Net: filling empty years...')
nodes_filled = True
while nodes_filled:
nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=True)
nodes_filled = True
while nodes_filled:
nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=False)
for node in self.graph.nodes:
if not self.graph.nodes[node]['year']:
self.graph.nodes[node]['year'] = Net.MAX_YEAR
if model and dct:
print('wiki.Net: calculating weights...')
self.graph.graph['tfidf'] = Net.compute_tfidf(self.nodes, dump, model, dct)
Net.set_weights(self.graph)
if compute_core_periphery:
print('wiki.Net: computing core-periphery...')
Net.assign_core_periphery(self.graph)
if compute_communities:
print('wiki.Net: computing communities...')
Net.assign_communities(self.graph)
if compute_community_cores:
print('wiki.Net: computing cores within communities...')
Net.assign_cores_to_communities(self.graph)
def load_graph(self, path):
"""Loads ``graph`` from ``path``.
If ``filename.gexf`` then read as ``gexf``.
Else, use ``pickle``."""
if path.split('.')[-1]=='gexf':
self.graph = nx.read_gexf(path)
else:
self.graph = nx.read_gpickle(path)
def save_graph(self, path):
"""Saves ``graph`` at ``path``.
If ``filename.gexf`` then save as ``gexf``.
Else, use ``pickle``."""
if path.split('.')[-1]=='gexf':
nx.write_gexf(self.graph, path)
else:
nx.write_gpickle(self.graph, path)
def load_barcodes(self, path):
"""Loads ``barcodes`` from ``pickle``."""
self._barcodes = pickle.load(open(path, 'rb'))
def save_barcodes(self, path):
"""Saves ``barcodes`` as ``pickle``."""
pickle.dump(self.barcodes, open(path, 'wb'))
def randomize(self, null_type,
compute_core_periphery=True, compute_communities=True,
compute_community_cores=True):
"""Returns a new ``wiki.Net`` with a randomized
copy of ``graph``. Set ``null_type`` as one of
``'year'``, ``'target'``.
"""
network = Net()
network.graph = self.graph.copy()
if null_type == 'year':
years = list(nx.get_node_attributes(network.graph, 'year').values())
random.shuffle(years)
for node in network.graph.nodes:
network.graph.nodes[node]['year'] = years.pop()
elif null_type == 'target':
nodes = list(network.graph.nodes)
for s, t in self.graph.edges:
network.graph.remove_edge(s, t)
nodes.remove(t)
network.graph.add_edge(s, random.choice(nodes),
weight=self.graph[s][t]['weight'])
nodes.append(t)
elif null_type == 'source':
nodes = list(network.graph.nodes)
for s, t in self.graph.edges:
network.graph.remove_edge(s, t)
nodes.remove(s)
network.graph.add_edge(random.choice(nodes), t,
weight=self.graph[s][t]['weight'])
nodes.append(s)
if compute_core_periphery:
print('wiki.Net: computing core-periphery...')
Net.assign_core_periphery(network.graph)
if compute_communities:
print('wiki.Net: computing communities...')
Net.assign_communities(network.graph)
if compute_community_cores:
print('wiki.Net: computing cores within communities...')
Net.assign_cores_to_communities(self.graph)
return network
@staticmethod
def fill_empty_nodes(graph, full_parents=True):
""" Fills nodes without ``year`` with the ``year`` of parents
Parameters
----------
graph: networkx.DiGraph
full_parents: bool
whether to fill empty nodes that have
all parents with non-empty 'year'
Returns
-------
bool
whether at least 1 empty node was filled
"""
empty_nodes = [n for n in graph.nodes if not graph.nodes[n]['year']]
for node in empty_nodes:
years = [graph.nodes[p]['year'] for p in graph.predecessors(node)]
if not years:
continue
if full_parents:
if [] not in years:
graph.nodes[node]['year'] = max(years) \
+ Net.YEAR_FILLED_DELTA
return True
else:
years_filtered = [y for y in years if y]
if years_filtered:
graph.nodes[node]['year'] = max(years_filtered) \
+ Net.YEAR_FILLED_DELTA
return True
return False
@staticmethod
def bft(graph, dump, queue, depth_goal=1, nodes=None, filter_top=True):
"""Breadth-first traversal of hyperlink graph.
Parameters
----------
graph: networkx.DiGraph
dump: wiki.Dump
queue: list of **strings**
names of Wikipedia pages
depth_goal: int
nodes: list of **strings**
names of Wikipedia pages
filter_top: bool
"""
queue = queue.copy()
depth = 0
depth_num_items = len(queue)
depth_inc_pending = False
print('wiki.Net: depth = ' + str(depth))
while queue:
name = queue.pop(0)
sys.stdout.write("\rwiki.Net: len(queue) = " + str(len(queue)))
sys.stdout.flush()
depth_num_items -= 1
if depth_num_items == 0:
depth += 1
print('\nwiki.Net: depth = ' + str(depth))
depth_inc_pending = True
if depth == depth_goal: break
page = dump.load_page(name, filter_top=filter_top)
if not page: continue
links = [l for l in dump.article_links
if Net.filter(name, l, graph, nodes)]
for link in links:
graph.add_edge(link, name, weight=1)
if link not in queue:
queue.append(link)
if depth_inc_pending:
depth_num_items = len(queue)
depth_inc_pending = False
@staticmethod
def filter(page, link, graph, nodes=None):
"""Filter out links"""
if nodes and link not in nodes:
return False
if (page, link) in graph.edges:
return False
return True
@staticmethod
def compute_tfidf(nodes, dump, model, dct):
"""Compute tf-idf of pages with titles in ``nodes``.
Parameters
----------
nodes: list of nodes
dump: wiki.Dump
model: gensim.modes.tfidfmodel.TfidfModel
dct: gensim.corpora.Dictionary
Returns
-------
vecs: scipy.sparse.csc.csc_matrix
"""
pages = [dump.load_page(page) for page in nodes]
bows = [model[dct.doc2bow(gu.simple_preprocess(page.strip_code()))]
if page else []
for page in pages]
return gmat.corpus2csc(bows)
@staticmethod
def set_weights(graph):
"""Set the weights of graph (``networkx.DiGraph``) as
the cosine similarity between ``graph.graph['tf-idf']]``
vectors of nodes."""
vecs = graph.graph['tfidf']
for n1, n2 in graph.edges:
v1 = vecs[:,list(graph.nodes).index(n1)].transpose()
v2 = vecs[:,list(graph.nodes).index(n2)].transpose()
graph[n1][n2]['weight'] = smp.cosine_similarity(X=v1, Y=v2)[0,0]
@staticmethod
def assign_core_periphery(graph):
""" Compute core-periphery of ``graph`` (``nx.DiGraph``;
converted to symmetric ``nx.Graph``).
Assign ``core`` as ``1`` or ``0`` to each node.
Assign ``coreness`` to ``graph``.
See ``core_periphery_dir()`` in ``bctpy``.
"""
# borgatti-everett
be = bct.core_periphery_dir(nx.convert_matrix.to_numpy_array(graph))
for i, node in enumerate(graph.nodes):
graph.nodes[node]['core_be'] = be[0][i]
graph.graph['coreness_be'] = be[1]
# rombach
rb = cpa.Rombach()
rb.detect(graph)
if rb.get_coreness() != 0:
for node, coreness in rb.get_coreness().items():
graph.nodes[node]['core_rb'] = coreness
graph.graph['coreness_rb'] = rb.score()[0]
else:
for node in graph.nodes:
graph.nodes[node]['core_rb'] = 0
graph.graph['coreness_rb'] = 0
@staticmethod
def assign_communities(graph):
""" Compute modular communities of ``graph`` (``nx.DiGraph``).
Assign community number ``community`` to each node.
Assign ``modularity`` to ``graph``.
See ``greedy_modularity_communities`` in ``networkx``.
"""
communities = nx.algorithms.community\
.greedy_modularity_communities(nx.Graph(graph))
for node in graph.nodes:
graph.nodes[node]['community'] = [i for i,c in enumerate(communities)
if node in c][0]
graph.graph['modularity'] = nx.algorithms.community.quality\
.modularity(nx.Graph(graph),
communities)
@staticmethod
def assign_cores_to_communities(graph):
""""""
num_comm = max([graph.nodes[n]['community'] for n in graph.nodes])
community_coreness_be = {i: 0 for i in range(num_comm)}
community_coreness_rb = {i: 0 for i in range(num_comm)}
for i in range(num_comm+1):
community = [n for n in graph.nodes if graph.nodes[n]['community']==i]
subgraph = graph.subgraph(community).copy()
matrix = nx.convert_matrix.to_numpy_array(subgraph)
if (matrix.size>1) & (np.sum(matrix)>0):
# borgatti-everett
be = bct.core_periphery_dir(matrix)
# rombach
rb = cpa.Rombach()
rb.detect(subgraph)
# assign
community_coreness_be[i] = be[1]
community_coreness_rb[i] = rb.score()[0]
cp_rb = rb.get_coreness()
for j, node in enumerate(subgraph.nodes):
graph.nodes[node]['community_core_be'] = be[0][j]
graph.nodes[node]['community_core_rb'] = cp_rb[node]
else:
community_coreness_be[i] = 0
community_coreness_rb[i] = 0
for j, node in enumerate(subgraph.nodes):
graph.nodes[node]['community_core_be'] = 1
graph.nodes[node]['community_core_rb'] = 1
graph.graph['community_coreness_be'] = community_coreness_be
graph.graph['community_coreness_rb'] = community_coreness_rb
class Model(PersistentHomology):
"""
Attributes
----------
graph: networkx.DiGraph
graph_parent: networkx.DiGraph
vectors: scipy.sparse.csc_matrix
vectors_parent: scipy.sparse.csc_matrix
seeds: {node string: [scipy.sparse.csc_matrix]}
thresholds: {node string: [float]}
year: int
record: pandas.DataFrame
record of evolution
year_start: int
n_seeds: int
number of seeds per node
point, insert, delete: tuple
See ``mutate()``.
rvs: lambda n->float
random values for point mutations & insertions
dct: gensim.corpora.dictionary
create: lambda n-> float
thresholds of cosine similarity with parent
for node creation
crossover: float
threshold of cosine similarity with parent
for crossing over nodes
"""
def __init__(self, graph_parent, vectors_parent, year_start, start_nodes,
n_seeds, dct, point, insert, delete, rvs,
create, crossover=None):
"""
Parameters
----------
start_nodes: lambda wiki.Model -> list(networkx.Nodes)
"""
PersistentHomology.__init__(self)
self.graph_parent = graph_parent
self.vectors_parent = vectors_parent
self.year_start = year_start
self.year = year_start
self.seeds = {}
self.thresholds = {}
self.record = pd.DataFrame()
nodes = list(graph_parent.nodes)
self.start_nodes = start_nodes(self)
self.graph = graph_parent.subgraph(self.start_nodes).copy()
self.vectors = sp.sparse.hstack([vectors_parent[:,nodes.index(n)]
for n in self.start_nodes])
self.n_seeds = n_seeds
self.dct = dct
self.point = point
self.insert = insert
self.delete = delete
self.rvs = rvs
self.create = create
self.crossover = crossover
def __str__(self):
return f"Model\tparent: '{self.graph_parent.name}'\n" +\
f"\tyear_start: {self.year_start}\n" +\
f"\tstart_nodes: {self.start_nodes}\n" +\
f"\tn_seeds: {self.n_seeds}\n" +\
f"\tpoint: ({self.point[0]:.4f}, {self.point[1]:.4f})\n" +\
f"\tinsert: ({self.insert[0]}, {self.insert[1]:.4f}, {type(self.insert[2])})\n" +\
f"\tdelete: ({self.delete[0]}, {self.delete[1]:.4f})"
def __repr__(self):
return self.__str__()
def evolve(self, until, record=False):
""" Evolves a graph based on vector representations
until `until (lambda wiki.Model) == True`
"""
year_start = self.year
while not until(self):
sys.stdout.write(f"\r{year_start} > {self.year} "+\
f"n={self.graph.number_of_nodes()} ")
sys.stdout.flush()
self.initialize_seeds()
self.mutate_seeds()
self.create_nodes()
if record:
self.record = pd.concat([self.record] + \
[pd.DataFrame({'Year': self.year,
'Parent': seed,
'Seed number': i,
'Seed vectors': seed_vec}, index=[0])
for seed, seed_vecs in self.seeds.items()
for i, seed_vec in enumerate(seed_vecs)],
ignore_index=True,
sort=False)
self.year += 1
print('')
def initialize_seeds(self):
nodes = list(self.graph.nodes)
for i, node in enumerate(nodes):
if node not in self.seeds.keys():
self.seeds[node] = []
if node not in self.thresholds.keys():
self.thresholds[node] = []
while len(self.seeds[node]) < self.n_seeds:
self.seeds[node] += [self.vectors[:,i].copy()]
while len(self.thresholds[node]) < self.n_seeds:
self.thresholds[node] += [self.create(1)[0]]
def mutate_seeds(self):
for node, vecs in self.seeds.items():
self.seeds[node] = [Model.mutate(vec, self.rvs, self.point,
self.insert, self.delete)
for vec in vecs]
def crossover_seeds(self):
nodes = list(self.graph.nodes)
for i in range(len(nodes)):
seeds_i = sp.sparse.hstack(self.seeds[nodes[i]])
for j in range(i+1,len(nodes)):
seeds_j = sp.sparse.hstack(self.seeds[nodes[j]])
similarity = smp.cosine_similarity(seeds_i.transpose(),
seeds_j.transpose())
for k,l in np.argwhere(similarity>self.threshold):
cross = Model.crossover(seeds_i[:,k], seeds_j[:,l])
choice = np.random.choice(2)
self.seeds[nodes[i]][k] = cross if choice else self.vectors[:,i]
self.seeds[nodes[j]][l] = cross if not choice else self.vectors[:,j]
def create_nodes(self):
nodes = list(self.graph.nodes)
for i, node in enumerate(nodes):
parent = self.vectors[:,i]
seeds = sp.sparse.hstack(self.seeds[node])
sim_to_parent = smp.cosine_similarity(parent.transpose(), seeds.transpose())
for j, seed_vec in enumerate(self.seeds[node]):
if sim_to_parent[0,j] < self.thresholds[node][j]:
Model.connect(seed_vec, self.graph, self.vectors, self.dct)
self.vectors = sp.sparse.hstack([self.vectors, seed_vec])
self.seeds[node].pop(j)
self.thresholds[node].pop(j)
for node in self.graph.nodes:
if 'year' not in self.graph.nodes[node].keys():
self.graph.nodes[node]['year'] = self.year
@staticmethod
def mutate(x, rvs, point=(0,0), insert=(0,0,None), delete=(0,0)):
""" Mutates vector ``x`` with point mutations,
insertions, and deletions. Insertions and point
mutations draw from a random process ``rvs``.
Parameters
----------
x: spipy.sparse.csc_matrix
rvs: lambda (n)-> float
returns ``n`` random weights in [0,1]
point: tuple (int n, float p)
n = number of elements to insert
p = probability of insertion for each trial
insert: tuple (n, p, iterable s)
s = set of elements from which to select
if None, select from all zero elements
delete: tuple (n, p)
max_weight: float
"""
data = x.data
idx = x.indices
if idx.size==0:
return x
n_point = np.random.binomial(point[0], point[1])
i_point = np.random.choice(x.size, size=n_point, replace=False)
data[i_point] = rvs(n_point)
# insertion
n_insert = np.random.binomial(insert[0], insert[1])
for _ in range(n_insert):
while True:
insert_idx = np.random.choice(insert[2]) if insert[2]\
else np.random.choice(x.shape[0])
if insert_idx not in idx: break
idx = np.append(idx, insert_idx)
data = np.append(data, rvs(1))
# deletion
n_delete = np.random.binomial(delete[0], delete[1])
i_delete = np.random.choice(idx.size, size=n_delete, replace=False)
idx = np.delete(idx, i_delete)
data = np.delete(data, i_delete)
y = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),
shape=x.shape)
return y
@staticmethod
def connect(seed_vector, graph, vectors, dct, top_words=10, match_n=6):
"""
Parameters
----------
seed_vector: scipy.sparse.csc_matrix
graph: networkx.DiGraph (not optional)
vectors: scipy.sparse.csc_matrix (not optional)
dct: gensim.corpora.dictionary (not optional)
top_words: int (default=5)
match_n: int
how many words should be matched by...
"""
seed_top_words, seed_top_idx = Model.find_top_words(seed_vector, dct)
seed_name = ' '.join(seed_top_words)
nodes = list(graph.nodes)
graph.add_node(seed_name)
for i, node in enumerate(nodes):
node_vector = vectors[:,i]
node_top_words, node_top_idx = Model.find_top_words(node_vector, dct)
if len(set(seed_top_idx).intersection(set(node_vector.indices))) >= match_n or\
len(set(node_top_idx).intersection(set(seed_vector.indices))) >= match_n:
graph.add_edge(node, seed_name)
@staticmethod
def find_top_words(x, dct, top_n=10):
"""
Parameters
----------
x: scipy.sparse.csc_matrix
dct: gensim.corpora.dictionary
top_n: int
Returns
-------
words:
idx_vector:
"""
top_idx = np.argsort(x.data)[-top_n:]
idx = [x.indices[i] for i in top_idx]
words = [dct[i] for i in idx]
words_nostop = gpp.remove_stopwords(' '.join(words)).split(' ')
idx_keep = list(map(lambda x: words.index(x), set(words).intersection(words_nostop)))
idx_nostop = list(map(idx.__getitem__, idx_keep))
return words_nostop, idx_nostop
@staticmethod
def crossover(v1, v2):
""" Crosses two vectors by combining half of one
and half of the other.
Parameters
----------
v1, v2: scipy.sparse.matrix
Returns
-------
v3: scipy.sparse.matrix
"""
idx1 = np.random.choice(v1.size, size=int(v1.size/2))
idx2 = np.random.choice(v2.size, size=int(v2.size/2))
data = np.array([v1.data[i] for i in idx1] +
[v2.data[i] for i in idx2])
idx = np.array([v1.indices[i] for i in idx1] +
[v2.indices[i] for i in idx2])
v3 = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),
shape=v1.shape)
return v3 | module/wiki.py | import os
import re
import sys
import bz2
import bct
import math
import random
import pickle
import numpy as np
import scipy as sp
import pandas as pd
import dionysus as d
import networkx as nx
import cpnet as cpa
import gensim.utils as gu
import gensim.models as gm
import gensim.matutils as gmat
import gensim.parsing.preprocessing as gpp
import mwparserfromhell as mph
import xml.etree.ElementTree as ET
import sklearn.metrics.pairwise as smp
class Dump:
"""``Dump`` loads and parses dumps from wikipedia from
``path_xml`` with index ``path_idx``.
Attributes
----------
idx: dictionary
``{'page_name': (byte offset, page id, block size)}``
Cached. Lazy.
links: list of strings
All links.
article_links: list of strings
Article links (not files, categories, etc.)
years: list of int
Years in the History section of a wikipedia page
BC denoted as negative values
page: mwparserfromhell.wikicode
Current loaded wiki page
path_xml: string
Path to the zipped XML dump file.
path_idx: string
Path to the zipped index file.
offset_max: int
Maximum offset. Set as the size of the zipped dump.
cache: xml.etree.ElementTree.Node
Cache of the XML tree in current block
"""
MAX_YEAR = 2020
def __init__(self, path_xml, path_idx):
self._idx = {}
self._links = []
self._article_links = []
self._years = []
self._page = None
self.path_xml = path_xml
self.path_idx = path_idx
self.offset_max = 0
self.cache = (0, None) # offset, cache
@property
def idx(self):
if self._idx:
return self._idx
else:
print('Dump: Loading index...')
with bz2.BZ2File(self.path_idx, 'rb') as file:
lines = [line for line in file]
block_end = os.path.getsize(self.path_xml)
offset_prev = block_end
for line in reversed(lines):
offset, pid, name = line.strip().split(b':', 2)
offset, pid, name = (int(offset), int(pid), name.decode('utf8'))
block_end = offset_prev if offset < offset_prev else block_end
self._idx[name] = (offset, pid, block_end-offset)
offset_prev = offset
self.offset_max = max([x[0] for x in self._idx.values()])
print('Dump: Loaded.')
return self._idx
@property
def links(self):
if self._links:
return self._links
elif self.page:
self._links = [str(x.title) for x in self.page.filter_wikilinks()]
self._links = [link.split('#')[0] for link in self._links]
self._links = [link.split(' ') for link in self._links]
self._links = [[words[0].capitalize()] + words[1:] for words in self._links]
self._links = [' '.join(words) for words in self._links]
return self._links
else:
return self._links
@property
def article_links(self):
if self._article_links:
return self._article_links
elif self.links:
self._article_links = [x for x in self.links if ':' not in x]
return self._article_links
else:
return self._article_links
@property
def years(self):
if self._years:
return self._years
elif self.page:
history = Dump.get_history(self.page)
top = self.page.get_sections()[0].strip_code()
self._years = Dump.filter_years(top + history)
return self._years
else:
return self._years
@property
def page(self):
return self._page
@page.setter
def page(self, page):
self._page = page
self._links = []
self._article_links = []
self._years = []
def load_page(self, page_name, filter_top=False):
"""Loads & returs page (``mwparserfromhell.wikicode``)
named ``page_name`` from dump file. Returns only the
top section if ``filter_top``.
"""
if page_name not in self.idx.keys():
self.page = None
return
offset, pid, block_size = self.idx[page_name]
if offset == self.cache[0]:
root = self.cache[1]
else:
xml = Dump.fetch_block(self.path_xml, offset, block_size)
xml = b'<mediawiki>' + xml + b'</mediawiki>'*(offset != self.offset_max)
root = ET.fromstring(xml)
self.cache = (offset, root)
text = Dump.search_id(root, pid)
text = Dump.filter_top_section(text) if filter_top else text
self.page = mph.parse(text, skip_style_tags = True)
if self.page and 'REDIRECT' in self.page.strip_code():
redirect = self.page.filter_wikilinks()[0].title
return self.load_page(str(redirect))
else:
return self.page
@staticmethod
def fetch_block(path, offset, block_size):
""" Fetches block of ``block_size`` (``int``) bytes
at ``offset`` (``int``) in the zipped dump at
``path`` (``string``) and returns the uncompressed
text (``string``).
"""
with open(path, 'rb') as file:
file.seek(offset)
return bz2.decompress(file.read(block_size))
@staticmethod
def search_id(root, pid):
"""Returns the text of the page with id ``pid``"""
for page in root.iter('page'):
if pid == int(page.find('id').text):
return page.find('revision').find('text').text
@staticmethod
def filter_top_section(text):
"""Returns the top section of text,
where the first header has the form ``==Heading==``
"""
head = re.search(r'==.*?==', text)
idx = head.span(0)[0] if head else len(text)
return text[:idx] #(text[:idx], text[idx:])
@staticmethod
def get_history(page):
"""Returns the text of the history section.
Returns ``""`` if not found.
"""
headings = page.filter_headings()
idx = [i for i, head in enumerate(headings)
if 'History' in head or 'history' in head]
if not idx:
return ""
sections = page.get_sections(include_headings=True)
history = str(sections[idx[0]+1].strip_code())
return history
@staticmethod
def filter_years(text):
"""Filters the years from text."""
months = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december']
prepositions = ['around', 'after', 'at', 'as',
'approximately', 'before', 'between', 'by',
'during', 'from', 'in', 'near', 'past',
'since', 'until', 'within'] # removed: about, on
conjugations = ['and']
articles = ['the']
times = ['early', 'mid', 'late']
patterns = months + prepositions + conjugations + articles + times
re_string = r'\b(' + '|'.join(patterns) + r')\b(\s|-)\b([0-9]{3,4})s?\b(?i)(?!\sMYA)\s?(BCE|BC)?'
years = [int(match.group(3)) * (-2*bool(match.group(4))+1)
for match in re.finditer(re_string, text, re.IGNORECASE)]
re_string = r'([0-9]{1,2})(st|nd|rd|th) century\s?(BCE|BC)?'
centuries = [(int(match.group(1)) * 100 - 100) * (-2*bool(match.group(2))+1)
for match in re.finditer(re_string, text, re.IGNORECASE)]
years += centuries
years = [y for y in years if y<Dump.MAX_YEAR]
return sorted(years + centuries)
class Corpus:
"""``Corpus`` is an ``iterable`` & an ``iterator``
that uses ``Dump`` to iterate through articles.
Parameters
----------
dump: wiki.Dump
output: string
'doc' for array of documents
'tag' for TaggedDocument(doc, [self.i])
'bow' for bag of words [(int, int)]
dct: gensim.corpus.Dictionary
used to create BoW representation
"""
def __init__(self, dump, output='doc', dct=None):
self.dump = dump
self.names = list(self.dump.idx.keys())
self.output = output
self.dct = dct
def __iter__(self):
self.i = 0
return self
def __next__(self):
if self.i < len(self.names):
sys.stdout.write("\rCorpus index: " + str(self.i+1) +
'/' + str(len(self.names)))
sys.stdout.flush()
if self.output == 'doc':
doc = self[self.i]
elif self.output == 'tag':
doc = gm.doc2vec.TaggedDocument(self[self.i], [self.i])
elif self.output == 'bow':
doc = self.dct.doc2bow(self[self.i])
self.i += 1
return doc
else:
raise StopIteration
def __getitem__(self, index):
doc = self.dump.load_page(self.names[index])
return gu.simple_preprocess(doc.strip_code())
class GraphContainer():
"""
Attributes
----------
graph: networkx.DiGraph
node name is name of wikipedia page
``year`` attribute indicates year
numbered: networkx.DiGraph
node name is an index (see nodes),
``year`` is an index (see years), lazy
nodes: list
List of node names,
indexed by node in ``numbered``, lazy
years: list
List of years,
indexed by ``year`` in ``numbered``, lazy
nodes_for_year: dict
``{int year: [int node_index]}``, lazy
"""
def __init__(self):
self.graph = nx.DiGraph()
self._numbered = None
self._nodes = []
self._years = []
self._nodes_for_year = {}
@property
def numbered(self):
if self._numbered:
return self._numbered
else:
self._numbered = nx.DiGraph()
for node in self.graph.nodes:
self._numbered.add_node(self.nodes.index(node),
year = self.years.index(self.graph.nodes[node]['year']))
self._numbered.add_edges_from([(self.nodes.index(node), self.nodes.index(succ))
for succ in self.graph.successors(node)])
return self._numbered
@property
def nodes(self):
if self._nodes:
return self._nodes
else:
self._nodes = list(self.graph.nodes)
return self._nodes
@property
def years(self):
if self._years:
return self._years
else:
self._years = [self.graph.nodes[n]['year']
for n in self.graph.nodes()]
self._years = sorted(list(set(self._years)))
return self._years
@property
def nodes_for_year(self):
if self._nodes_for_year:
return self._nodes_for_year
else:
self._nodes_for_year = {year: [list(self.graph.nodes).index(n)
for n in self.graph.nodes
if self.graph.nodes[n]['year']==year]
for year in self.years}
return self._nodes_for_year
class PersistentHomology(GraphContainer):
"""
Attributes
----------
cliques: list of lists
lazy
filtration: dionysus.filtration
lazy
persistence: dionysus.reduced_matrix
lazy
barcodes: pandas.DataFrame
lazy
"""
def __init__(self):
GraphContainer.__init__(self)
self._cliques = None
self._filtration = None
self._persistence = None
self._barcodes = None
@property
def cliques(self):
if self._cliques:
return self._cliques
else:
self._cliques = list(nx.algorithms.clique.\
enumerate_all_cliques(nx.Graph(self.numbered)))
return self._cliques
@property
def filtration(self):
if self._filtration != None:
return self._filtration
else:
self._filtration = d.Filtration()
nodes_so_far = []
for year in self.years:
nodes_now = self.nodes_for_year[year]
nodes_so_far.extend(nodes_now)
for clique in self.cliques:
if all([n in nodes_so_far for n in clique]):
self._filtration.append(d.Simplex(clique, year))
self._filtration.sort()
return self._filtration
@property
def persistence(self):
if self._persistence:
return self._persistence
else:
self._persistence = d.homology_persistence(self.filtration)
return self._persistence
@property
def barcodes(self):
if isinstance(self._barcodes, pd.DataFrame)\
and len(self._barcodes.index) != 0:
return self._barcodes
else:
self._barcodes = PersistentHomology.compute_barcodes(
self.filtration, self.persistence, self.graph, self.nodes)
return self._barcodes
@staticmethod
def compute_barcodes(f, m, graph, names):
"""Uses dionysus filtration & persistence
(in reduced matrix form) to compute barcodes.
Parameters
----------
f: dionysus.Filtration
m: dionysus.ReducedMatrix
(see homology_persistence)
names: list of strings
names of node indices
Returns
-------
barcodes: pandas.DataFrame
"""
print('wiki.Net: computing barcodes... (skip negatives)')
node_list = list(graph.nodes)
barcodes = []
for i, c in enumerate(m):
if m.pair(i) < i: continue
sys.stdout.write("\rwiki.Net: barcode {}/{}".\
format(i+1,len(m)))
sys.stdout.flush()
dim = f[i].dimension()
birth_year = int(f[i].data)
birth_simplex = [names[s] for s in f[i]]
birth_nodes = [n for n in birth_simplex
if graph.nodes[n]['year']==birth_year]
if m.pair(i) != m.unpaired:
death_year = int(f[m.pair(i)].data)
death_simplex = [names[s] for s in f[m.pair(i)]]
death_nodes = [n for n in death_simplex
if graph.nodes[n]['year']==death_year]
else:
death_year = np.inf
death_simplex = []
death_nodes = []
pair = m.pair(i) if m.pair(i) != m.unpaired else np.inf
chain = m[pair] if pair != np.inf else m[i]
simp_comp = [f[entry.index] for entry in chain]
nodes = [node_list[idx] for simplex in simp_comp for idx in simplex]
barcodes.append([dim, birth_year, death_year, death_year-birth_year,
birth_simplex, death_simplex,
birth_nodes, death_nodes, list(set(nodes))])
print('')
barcodes.sort(key=lambda x: x[0])
bar_data = pd.DataFrame(data=barcodes,
columns=['dim', 'birth', 'death', 'lifetime',
'birth simplex', 'death simplex',
'birth nodes', 'death nodes',
'homology nodes'])
return bar_data
class Net(PersistentHomology):
"""``Net`` is a wrapper for ``networkx.DiGraph``.
Uses ``dionysus`` for persistence homology.
Attributes
----------
tfidf: scipy.sparse.csc.csc_matrix
sparse column matrix of tfidfs,
ordered by nodes, also stored in
```self.graph.graph['tfidf']```, lazy
MAX_YEAR: int
``year = MAX_YEAR (2020)`` for nodes with parents
without years
YEAR_FILLED_DELTA: int
``year = year of parents + YEAR_FILLED_DELTA (1)``
"""
MAX_YEAR = 2020
YEAR_FILLED_DELTA = 1
def __init__(self, path_graph='', path_barcodes=''):
PersistentHomology.__init__(self)
self._tfidf = None
if path_graph:
self.load_graph(path_graph)
if path_barcodes:
self.load_barcodes(path_barcodes)
@property
def tfidf(self):
if self._tfidf:
return self._tfidf
elif 'tfidf' in self.graph.graph.keys():
self._tfidf = self.graph.graph['tfidf']
return self._tfidf
def build_graph(self, name='', dump=None, nodes=None, depth_goal=1,
filter_top=True, remove_isolates=True, add_years=True,
fill_empty_years=True, model=None, dct=None,
compute_core_periphery=True, compute_communities=True,
compute_community_cores=True):
""" Builds ``self.graph`` (``networkx.Graph``) from nodes (``list``
of ``string``). Set ``model`` (from ``gensim``) and ``dct``
(``gensim.corpora.Dictionary``) for weighted edges.
"""
self.graph = nx.DiGraph()
self.graph.name = name
if not dump:
raise AttributeError('wiki.Net: Provide wiki.Dump object.')
print('wiki.Net: traversing Wikipedia...')
Net.bft(self.graph, dump, nodes, depth_goal=depth_goal,
nodes=nodes, filter_top=filter_top)
if remove_isolates:
print('wiki.Net: removing isolates...')
self.graph.remove_nodes_from(nx.isolates(self.graph))
if add_years:
print('wiki.Net: adding years...')
for node in self.graph.nodes:
dump.load_page(node)
self.graph.nodes[node]['year'] = dump.years[0] if len(dump.years)>0 else []
self.graph.graph['num_years'] = sum(
[bool(y) for y in nx.get_node_attributes(self.graph, 'year').values()]
)
if fill_empty_years:
print('wiki.Net: filling empty years...')
nodes_filled = True
while nodes_filled:
nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=True)
nodes_filled = True
while nodes_filled:
nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=False)
for node in self.graph.nodes:
if not self.graph.nodes[node]['year']:
self.graph.nodes[node]['year'] = Net.MAX_YEAR
if model and dct:
print('wiki.Net: calculating weights...')
self.graph.graph['tfidf'] = Net.compute_tfidf(self.nodes, dump, model, dct)
Net.set_weights(self.graph)
if compute_core_periphery:
print('wiki.Net: computing core-periphery...')
Net.assign_core_periphery(self.graph)
if compute_communities:
print('wiki.Net: computing communities...')
Net.assign_communities(self.graph)
if compute_community_cores:
print('wiki.Net: computing cores within communities...')
Net.assign_cores_to_communities(self.graph)
def load_graph(self, path):
"""Loads ``graph`` from ``path``.
If ``filename.gexf`` then read as ``gexf``.
Else, use ``pickle``."""
if path.split('.')[-1]=='gexf':
self.graph = nx.read_gexf(path)
else:
self.graph = nx.read_gpickle(path)
def save_graph(self, path):
"""Saves ``graph`` at ``path``.
If ``filename.gexf`` then save as ``gexf``.
Else, use ``pickle``."""
if path.split('.')[-1]=='gexf':
nx.write_gexf(self.graph, path)
else:
nx.write_gpickle(self.graph, path)
def load_barcodes(self, path):
"""Loads ``barcodes`` from ``pickle``."""
self._barcodes = pickle.load(open(path, 'rb'))
def save_barcodes(self, path):
"""Saves ``barcodes`` as ``pickle``."""
pickle.dump(self.barcodes, open(path, 'wb'))
def randomize(self, null_type,
compute_core_periphery=True, compute_communities=True,
compute_community_cores=True):
"""Returns a new ``wiki.Net`` with a randomized
copy of ``graph``. Set ``null_type`` as one of
``'year'``, ``'target'``.
"""
network = Net()
network.graph = self.graph.copy()
if null_type == 'year':
years = list(nx.get_node_attributes(network.graph, 'year').values())
random.shuffle(years)
for node in network.graph.nodes:
network.graph.nodes[node]['year'] = years.pop()
elif null_type == 'target':
nodes = list(network.graph.nodes)
for s, t in self.graph.edges:
network.graph.remove_edge(s, t)
nodes.remove(t)
network.graph.add_edge(s, random.choice(nodes),
weight=self.graph[s][t]['weight'])
nodes.append(t)
elif null_type == 'source':
nodes = list(network.graph.nodes)
for s, t in self.graph.edges:
network.graph.remove_edge(s, t)
nodes.remove(s)
network.graph.add_edge(random.choice(nodes), t,
weight=self.graph[s][t]['weight'])
nodes.append(s)
if compute_core_periphery:
print('wiki.Net: computing core-periphery...')
Net.assign_core_periphery(network.graph)
if compute_communities:
print('wiki.Net: computing communities...')
Net.assign_communities(network.graph)
if compute_community_cores:
print('wiki.Net: computing cores within communities...')
Net.assign_cores_to_communities(self.graph)
return network
@staticmethod
def fill_empty_nodes(graph, full_parents=True):
""" Fills nodes without ``year`` with the ``year`` of parents
Parameters
----------
graph: networkx.DiGraph
full_parents: bool
whether to fill empty nodes that have
all parents with non-empty 'year'
Returns
-------
bool
whether at least 1 empty node was filled
"""
empty_nodes = [n for n in graph.nodes if not graph.nodes[n]['year']]
for node in empty_nodes:
years = [graph.nodes[p]['year'] for p in graph.predecessors(node)]
if not years:
continue
if full_parents:
if [] not in years:
graph.nodes[node]['year'] = max(years) \
+ Net.YEAR_FILLED_DELTA
return True
else:
years_filtered = [y for y in years if y]
if years_filtered:
graph.nodes[node]['year'] = max(years_filtered) \
+ Net.YEAR_FILLED_DELTA
return True
return False
@staticmethod
def bft(graph, dump, queue, depth_goal=1, nodes=None, filter_top=True):
"""Breadth-first traversal of hyperlink graph.
Parameters
----------
graph: networkx.DiGraph
dump: wiki.Dump
queue: list of **strings**
names of Wikipedia pages
depth_goal: int
nodes: list of **strings**
names of Wikipedia pages
filter_top: bool
"""
queue = queue.copy()
depth = 0
depth_num_items = len(queue)
depth_inc_pending = False
print('wiki.Net: depth = ' + str(depth))
while queue:
name = queue.pop(0)
sys.stdout.write("\rwiki.Net: len(queue) = " + str(len(queue)))
sys.stdout.flush()
depth_num_items -= 1
if depth_num_items == 0:
depth += 1
print('\nwiki.Net: depth = ' + str(depth))
depth_inc_pending = True
if depth == depth_goal: break
page = dump.load_page(name, filter_top=filter_top)
if not page: continue
links = [l for l in dump.article_links
if Net.filter(name, l, graph, nodes)]
for link in links:
graph.add_edge(link, name, weight=1)
if link not in queue:
queue.append(link)
if depth_inc_pending:
depth_num_items = len(queue)
depth_inc_pending = False
@staticmethod
def filter(page, link, graph, nodes=None):
"""Filter out links"""
if nodes and link not in nodes:
return False
if (page, link) in graph.edges:
return False
return True
@staticmethod
def compute_tfidf(nodes, dump, model, dct):
"""Compute tf-idf of pages with titles in ``nodes``.
Parameters
----------
nodes: list of nodes
dump: wiki.Dump
model: gensim.modes.tfidfmodel.TfidfModel
dct: gensim.corpora.Dictionary
Returns
-------
vecs: scipy.sparse.csc.csc_matrix
"""
pages = [dump.load_page(page) for page in nodes]
bows = [model[dct.doc2bow(gu.simple_preprocess(page.strip_code()))]
if page else []
for page in pages]
return gmat.corpus2csc(bows)
@staticmethod
def set_weights(graph):
"""Set the weights of graph (``networkx.DiGraph``) as
the cosine similarity between ``graph.graph['tf-idf']]``
vectors of nodes."""
vecs = graph.graph['tfidf']
for n1, n2 in graph.edges:
v1 = vecs[:,list(graph.nodes).index(n1)].transpose()
v2 = vecs[:,list(graph.nodes).index(n2)].transpose()
graph[n1][n2]['weight'] = smp.cosine_similarity(X=v1, Y=v2)[0,0]
@staticmethod
def assign_core_periphery(graph):
""" Compute core-periphery of ``graph`` (``nx.DiGraph``;
converted to symmetric ``nx.Graph``).
Assign ``core`` as ``1`` or ``0`` to each node.
Assign ``coreness`` to ``graph``.
See ``core_periphery_dir()`` in ``bctpy``.
"""
# borgatti-everett
be = bct.core_periphery_dir(nx.convert_matrix.to_numpy_array(graph))
for i, node in enumerate(graph.nodes):
graph.nodes[node]['core_be'] = be[0][i]
graph.graph['coreness_be'] = be[1]
# rombach
rb = cpa.Rombach()
rb.detect(graph)
if rb.get_coreness() != 0:
for node, coreness in rb.get_coreness().items():
graph.nodes[node]['core_rb'] = coreness
graph.graph['coreness_rb'] = rb.score()[0]
else:
for node in graph.nodes:
graph.nodes[node]['core_rb'] = 0
graph.graph['coreness_rb'] = 0
@staticmethod
def assign_communities(graph):
""" Compute modular communities of ``graph`` (``nx.DiGraph``).
Assign community number ``community`` to each node.
Assign ``modularity`` to ``graph``.
See ``greedy_modularity_communities`` in ``networkx``.
"""
communities = nx.algorithms.community\
.greedy_modularity_communities(nx.Graph(graph))
for node in graph.nodes:
graph.nodes[node]['community'] = [i for i,c in enumerate(communities)
if node in c][0]
graph.graph['modularity'] = nx.algorithms.community.quality\
.modularity(nx.Graph(graph),
communities)
@staticmethod
def assign_cores_to_communities(graph):
""""""
num_comm = max([graph.nodes[n]['community'] for n in graph.nodes])
community_coreness_be = {i: 0 for i in range(num_comm)}
community_coreness_rb = {i: 0 for i in range(num_comm)}
for i in range(num_comm+1):
community = [n for n in graph.nodes if graph.nodes[n]['community']==i]
subgraph = graph.subgraph(community).copy()
matrix = nx.convert_matrix.to_numpy_array(subgraph)
if (matrix.size>1) & (np.sum(matrix)>0):
# borgatti-everett
be = bct.core_periphery_dir(matrix)
# rombach
rb = cpa.Rombach()
rb.detect(subgraph)
# assign
community_coreness_be[i] = be[1]
community_coreness_rb[i] = rb.score()[0]
cp_rb = rb.get_coreness()
for j, node in enumerate(subgraph.nodes):
graph.nodes[node]['community_core_be'] = be[0][j]
graph.nodes[node]['community_core_rb'] = cp_rb[node]
else:
community_coreness_be[i] = 0
community_coreness_rb[i] = 0
for j, node in enumerate(subgraph.nodes):
graph.nodes[node]['community_core_be'] = 1
graph.nodes[node]['community_core_rb'] = 1
graph.graph['community_coreness_be'] = community_coreness_be
graph.graph['community_coreness_rb'] = community_coreness_rb
class Model(PersistentHomology):
"""
Attributes
----------
graph: networkx.DiGraph
graph_parent: networkx.DiGraph
vectors: scipy.sparse.csc_matrix
vectors_parent: scipy.sparse.csc_matrix
seeds: {node string: [scipy.sparse.csc_matrix]}
thresholds: {node string: [float]}
year: int
record: pandas.DataFrame
record of evolution
year_start: int
n_seeds: int
number of seeds per node
point, insert, delete: tuple
See ``mutate()``.
rvs: lambda n->float
random values for point mutations & insertions
dct: gensim.corpora.dictionary
create: lambda n-> float
thresholds of cosine similarity with parent
for node creation
crossover: float
threshold of cosine similarity with parent
for crossing over nodes
"""
def __init__(self, graph_parent, vectors_parent, year_start, start_nodes,
n_seeds, dct, point, insert, delete, rvs,
create, crossover=None):
"""
Parameters
----------
start_nodes: lambda wiki.Model -> list(networkx.Nodes)
"""
PersistentHomology.__init__(self)
self.graph_parent = graph_parent
self.vectors_parent = vectors_parent
self.year_start = year_start
self.year = year_start
self.seeds = {}
self.thresholds = {}
self.record = pd.DataFrame()
nodes = list(graph_parent.nodes)
self.start_nodes = start_nodes(self)
self.graph = graph_parent.subgraph(self.start_nodes).copy()
self.vectors = sp.sparse.hstack([vectors_parent[:,nodes.index(n)]
for n in self.start_nodes])
self.n_seeds = n_seeds
self.dct = dct
self.point = point
self.insert = insert
self.delete = delete
self.rvs = rvs
self.create = create
self.crossover = crossover
def __str__(self):
return f"Model\tparent: '{self.graph_parent.name}'\n" +\
f"\tyear_start: {self.year_start}\n" +\
f"\tstart_nodes: {self.start_nodes}\n" +\
f"\tn_seeds: {self.n_seeds}\n" +\
f"\tpoint: ({self.point[0]:.4f}, {self.point[1]:.4f})\n" +\
f"\tinsert: ({self.insert[0]}, {self.insert[1]:.4f}, {type(self.insert[2])})\n" +\
f"\tdelete: ({self.delete[0]}, {self.delete[1]:.4f})"
def __repr__(self):
return self.__str__()
def evolve(self, until, record=False):
""" Evolves a graph based on vector representations
until `until (lambda wiki.Model) == True`
"""
year_start = self.year
while not until(self):
sys.stdout.write(f"\r{year_start} > {self.year} "+\
f"n={self.graph.number_of_nodes()} ")
sys.stdout.flush()
self.initialize_seeds()
self.mutate_seeds()
self.create_nodes()
if record:
self.record = pd.concat([self.record] + \
[pd.DataFrame({'Year': self.year,
'Parent': seed,
'Seed number': i,
'Seed vectors': seed_vec}, index=[0])
for seed, seed_vecs in self.seeds.items()
for i, seed_vec in enumerate(seed_vecs)],
ignore_index=True,
sort=False)
self.year += 1
print('')
def initialize_seeds(self):
nodes = list(self.graph.nodes)
for i, node in enumerate(nodes):
if node not in self.seeds.keys():
self.seeds[node] = []
if node not in self.thresholds.keys():
self.thresholds[node] = []
while len(self.seeds[node]) < self.n_seeds:
self.seeds[node] += [self.vectors[:,i].copy()]
while len(self.thresholds[node]) < self.n_seeds:
self.thresholds[node] += [self.create(1)[0]]
def mutate_seeds(self):
for node, vecs in self.seeds.items():
self.seeds[node] = [Model.mutate(vec, self.rvs, self.point,
self.insert, self.delete)
for vec in vecs]
def crossover_seeds(self):
nodes = list(self.graph.nodes)
for i in range(len(nodes)):
seeds_i = sp.sparse.hstack(self.seeds[nodes[i]])
for j in range(i+1,len(nodes)):
seeds_j = sp.sparse.hstack(self.seeds[nodes[j]])
similarity = smp.cosine_similarity(seeds_i.transpose(),
seeds_j.transpose())
for k,l in np.argwhere(similarity>self.threshold):
cross = Model.crossover(seeds_i[:,k], seeds_j[:,l])
choice = np.random.choice(2)
self.seeds[nodes[i]][k] = cross if choice else self.vectors[:,i]
self.seeds[nodes[j]][l] = cross if not choice else self.vectors[:,j]
def create_nodes(self):
nodes = list(self.graph.nodes)
for i, node in enumerate(nodes):
parent = self.vectors[:,i]
seeds = sp.sparse.hstack(self.seeds[node])
sim_to_parent = smp.cosine_similarity(parent.transpose(), seeds.transpose())
for j, seed_vec in enumerate(self.seeds[node]):
if sim_to_parent[0,j] < self.thresholds[node][j]:
Model.connect(seed_vec, self.graph, self.vectors, self.dct)
self.vectors = sp.sparse.hstack([self.vectors, seed_vec])
self.seeds[node].pop(j)
self.thresholds[node].pop(j)
for node in self.graph.nodes:
if 'year' not in self.graph.nodes[node].keys():
self.graph.nodes[node]['year'] = self.year
@staticmethod
def mutate(x, rvs, point=(0,0), insert=(0,0,None), delete=(0,0)):
""" Mutates vector ``x`` with point mutations,
insertions, and deletions. Insertions and point
mutations draw from a random process ``rvs``.
Parameters
----------
x: spipy.sparse.csc_matrix
rvs: lambda (n)-> float
returns ``n`` random weights in [0,1]
point: tuple (int n, float p)
n = number of elements to insert
p = probability of insertion for each trial
insert: tuple (n, p, iterable s)
s = set of elements from which to select
if None, select from all zero elements
delete: tuple (n, p)
max_weight: float
"""
data = x.data
idx = x.indices
if idx.size==0:
return x
n_point = np.random.binomial(point[0], point[1])
i_point = np.random.choice(x.size, size=n_point, replace=False)
data[i_point] = rvs(n_point)
# insertion
n_insert = np.random.binomial(insert[0], insert[1])
for _ in range(n_insert):
while True:
insert_idx = np.random.choice(insert[2]) if insert[2]\
else np.random.choice(x.shape[0])
if insert_idx not in idx: break
idx = np.append(idx, insert_idx)
data = np.append(data, rvs(1))
# deletion
n_delete = np.random.binomial(delete[0], delete[1])
i_delete = np.random.choice(idx.size, size=n_delete, replace=False)
idx = np.delete(idx, i_delete)
data = np.delete(data, i_delete)
y = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),
shape=x.shape)
return y
@staticmethod
def connect(seed_vector, graph, vectors, dct, top_words=10, match_n=6):
"""
Parameters
----------
seed_vector: scipy.sparse.csc_matrix
graph: networkx.DiGraph (not optional)
vectors: scipy.sparse.csc_matrix (not optional)
dct: gensim.corpora.dictionary (not optional)
top_words: int (default=5)
match_n: int
how many words should be matched by...
"""
seed_top_words, seed_top_idx = Model.find_top_words(seed_vector, dct)
seed_name = ' '.join(seed_top_words)
nodes = list(graph.nodes)
graph.add_node(seed_name)
for i, node in enumerate(nodes):
node_vector = vectors[:,i]
node_top_words, node_top_idx = Model.find_top_words(node_vector, dct)
if len(set(seed_top_idx).intersection(set(node_vector.indices))) >= match_n or\
len(set(node_top_idx).intersection(set(seed_vector.indices))) >= match_n:
graph.add_edge(node, seed_name)
@staticmethod
def find_top_words(x, dct, top_n=10):
"""
Parameters
----------
x: scipy.sparse.csc_matrix
dct: gensim.corpora.dictionary
top_n: int
Returns
-------
words:
idx_vector:
"""
top_idx = np.argsort(x.data)[-top_n:]
idx = [x.indices[i] for i in top_idx]
words = [dct[i] for i in idx]
words_nostop = gpp.remove_stopwords(' '.join(words)).split(' ')
idx_keep = list(map(lambda x: words.index(x), set(words).intersection(words_nostop)))
idx_nostop = list(map(idx.__getitem__, idx_keep))
return words_nostop, idx_nostop
@staticmethod
def crossover(v1, v2):
""" Crosses two vectors by combining half of one
and half of the other.
Parameters
----------
v1, v2: scipy.sparse.matrix
Returns
-------
v3: scipy.sparse.matrix
"""
idx1 = np.random.choice(v1.size, size=int(v1.size/2))
idx2 = np.random.choice(v2.size, size=int(v2.size/2))
data = np.array([v1.data[i] for i in idx1] +
[v2.data[i] for i in idx2])
idx = np.array([v1.indices[i] for i in idx1] +
[v2.indices[i] for i in idx2])
v3 = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),
shape=v1.shape)
return v3 | 0.58676 | 0.130895 |
from civicboom.tests import *
from civicboom.lib.database.get_cached import get_member
from civicboom.model.payment import *
from civicboom.model.meta import Session
from sqlalchemy import or_, and_, not_, null
import copy, datetime, urlparse
class TestPaymentController(TestController):
"""
Tests for Payment Controller
"""
payment_account_ids = {}
def test_payment_all_ok(self):
self.server_datetime('now')
# Run invoice tasks to get any stray emails out of the way
self.run_task('run_invoice_tasks')
# Setup payment account numbers dict
self.payment_account_ids = {}
# Setup two accounts, ind and org & test invalid form entries for both!
self.part_sign_up('test_payment_ind', 'ind')
self.log_out()
self.part_sign_up('test_payment_org', 'org')
# Regrade org to free so as to get it out of the way...
self.part_regrade('test_payment_org', 'free')
self.part_regrade('test_payment_org', 'corp')
self.part_regrade('test_payment_org', 'free')
# Check 0 invoices still billed for free acct
assert len(get_member('test_payment_org').payment_account.invoices.filter(Invoice.status=='billed').all()) == 0
# Shift date to check for two day payment fail
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=1))
# Should not send email as two days max unpaid
emails = getNumEmails()
self.run_task('run_invoice_tasks')
assert emails == getNumEmails()
self.part_check_member_status('test_payment_ind', 'waiting')
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=2))
# Should send email as two days max unpaid
emails = getNumEmails()
self.run_task('run_invoice_tasks')
self.assertEqual(getNumEmails(), emails + 2) # 2 as admin is sent an email too
self.part_check_member_status('test_payment_ind', 'failed')
# Get outstanding invoice balance, ensure > 0 and manually pay 5 towards
outstanding = self.part_get_invoice('test_payment_ind').total_due
assert outstanding > 0
self.part_pay_invoice_manual(self.part_get_invoice('test_payment_ind'), 5)
# Check outstanding invoice balance changed, ensure > 0 and manually pay the rest
assert self.part_get_invoice('test_payment_ind').total_due == outstanding - 5
assert self.part_get_invoice('test_payment_ind').total_due > 0
self.part_pay_invoice_manual(self.part_get_invoice('test_payment_ind'), outstanding - 5)
# Check no outstanding balance on invoice, run invoice tasks to change invoice status
assert self.part_get_invoice('test_payment_ind').total_due == 0
self.run_task('run_invoice_tasks')
# Check invoice marked as paid & account is returned to "ok"
assert self.part_get_invoice('test_payment_ind').status == 'paid'
self.part_check_member_status('test_payment_ind', 'ok')
# Move to next month & check invoice created email
emails = getNumEmails()
invoices = len(get_member('test_payment_ind').payment_account.invoices.all())
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=27))
self.run_task('run_invoice_tasks')
self.assertEqual(getNumEmails(), emails + 2) # 2 as admin is sent an email too
self.assertEqual(len(get_member('test_payment_ind').payment_account.invoices.all()), invoices + 1)
self.assertIn('invoiced', getLastEmail().content_text)
# Move to invoice due date and test for invoice due email
emails = getNumEmails()
due_date = get_member('test_payment_ind').payment_account.invoices.filter(Invoice.status=='billed').one().due_date
self.server_datetime(datetime.datetime.combine(due_date, datetime.time(0,0,0)))
self.run_task('run_invoice_tasks')
self.assertEqual(getNumEmails(), emails + 2) # 2 as admin is sent an email too
self.assertIn('overdue', getLastEmail().content_text)
self.server_datetime('now')
def test_payment_ok_paypal(self):
self.server_datetime('now')
self.run_task('run_invoice_tasks')
self.part_sign_up('test_payment_paypal', 'org')
self.run_task('run_invoice_tasks')
self.part_check_member_status('test_payment_paypal', 'waiting')
outstanding = self.part_get_invoice('test_payment_paypal').total_due
# Get the invoice and call payment_begin for paypal
invoice = self.part_get_invoice('test_payment_paypal')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_begin', invoice_id=invoice.id, service="paypal_express"),
status=302
)
# Check correct response
assert 'sandbox.paypal.com' in response
# Grab 302'd url and token query parameter
redirect_location = response.response.headers['location']
redirect_token = urlparse.parse_qs(urlparse.urlparse(redirect_location).query)['token'][0]
# Check we got a "TESTTOKEN" back (actual token would be "TESTTOKEN-0" for e.g.)
assert 'TESTTOKEN' in redirect_token
# Get the invoice and call payment_return for paypal
invoice = self.part_get_invoice('test_payment_paypal')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_return', invoice_id=invoice.id, service="paypal_express", token=redirect_token, PayerID='rar'),
status=302
)
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_paypal')
print invoice, invoice.status, invoice.transactions[0].status, invoice.paid_total, invoice.total, invoice.total_due
assert invoice.status == 'paid'
self.part_check_member_status('test_payment_paypal', 'ok')
self.server_datetime('now')
def test_payment_ok_paypal_recurring(self):
self.server_datetime('now')
self.run_task('run_invoice_tasks')
self.part_sign_up('test_payment_paypal_rec', 'org')
self.run_task('run_invoice_tasks')
self.part_check_member_status('test_payment_paypal_rec', 'waiting')
outstanding = self.part_get_invoice('test_payment_paypal_rec').total_due
# Get the invoice and call payment_begin for paypal
invoice = self.part_get_invoice('test_payment_paypal_rec')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_begin', invoice_id=invoice.id, service="paypal_express", recurring="True"),
status=302
)
# Check correct response
assert 'sandbox.paypal.com' in response
# Grab 302'd url and token query parameter
redirect_location = response.response.headers['location']
redirect_token = urlparse.parse_qs(urlparse.urlparse(redirect_location).query)['token'][0]
# Check we got a "TESTTOKEN" back (actual token would be "TESTTOKEN-0" for e.g.)
assert 'TESTTOKEN' in redirect_token
# Get the invoice and call payment_return for paypal
invoice = self.part_get_invoice('test_payment_paypal_rec')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_return', invoice_id=invoice.id, service="paypal_express", token=redirect_token, PayerID='rar'),
status=302
)
# Check recurring billing account has been created
bacct = self.part_get_billing_account('test_payment_paypal_rec')
bacct_id = bacct.id
self.assertNotEqual(bacct, None)
self.assertIn('paypal_recurring', bacct.provider)
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_paypal_rec')
print invoice, invoice.status, invoice.transactions[0].status, invoice.paid_total, invoice.total, invoice.total_due
assert invoice.status == 'paid'
self.part_check_member_status('test_payment_paypal_rec', 'ok')
self.server_datetime('now')
# Move to next month & generate invoice so we can test (what greg believes to be) paypal recurring behaviour
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=27))
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_paypal_rec')
invoice_id = invoice.id
print invoice.due_date, invoice, invoice.status
self.assertEqual(invoice.status, 'billed')
self.run_task('run_billing_account_tasks')
transaction = Session.query(BillingTransaction).filter(and_(BillingTransaction.invoice_id==None,BillingTransaction.billing_account_id==bacct_id)).one()
self.assertIn('BP-TESTTOKEN', transaction.reference)
trans_id = transaction.id
self.run_task('run_match_billing_transactions')
transaction = Session.query(BillingTransaction).get(trans_id)
self.assertIsNotNone(transaction)
self.assertEqual(transaction.invoice_id, invoice_id)
self.assertIsNone(transaction.billing_account_id)
#self.run_task('')
# Cancel billing account
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids['test_payment_paypal_rec'], action='billing_account_deactivate', format='json'),
params={
'_authentication_token': self.auth_token,
'billing_account_id' : bacct_id,
},
status=200
)
self.server_datetime('now')
def test_payment_cancel_paypal(self):
self.server_datetime('now')
self.run_task('run_invoice_tasks')
self.part_sign_up('test_payment_pp_cancel', 'org')
self.run_task('run_invoice_tasks')
self.part_check_member_status('test_payment_pp_cancel', 'waiting')
outstanding = self.part_get_invoice('test_payment_pp_cancel').total_due
# Get the invoice and call payment_begin for paypal
invoice = self.part_get_invoice('test_payment_pp_cancel')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_begin', invoice_id=invoice.id, service="paypal_express"),
status=302
)
# Check correct response
assert 'sandbox.paypal.com' in response
# Grab 302'd url and token query parameter
redirect_location = response.response.headers['location']
redirect_token = urlparse.parse_qs(urlparse.urlparse(redirect_location).query)['token'][0]
# Check we got a "TESTTOKEN" back (actual token would be "<PASSWORD>" for e.g.)
assert 'TESTTOKEN' in redirect_token
# Get the invoice and call payment_return for paypal
invoice = self.part_get_invoice('test_payment_pp_cancel')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_cancel', invoice_id=invoice.id, service="paypal_express", token=redirect_token, PayerID='rar'),
status=302
)
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_pp_cancel')
print invoice, invoice.status, invoice.transactions[0].status, invoice.paid_total, invoice.total, invoice.total_due
assert invoice.status == 'billed'
assert invoice.transactions[0].status == 'cancelled'
self.part_check_member_status('test_payment_pp_cancel', 'waiting')
self.server_datetime('now')
def test_all_billing_tasks(self):
self.server_datetime('now')
self.run_task('run_billing_tasks')
#===========================================================================
# Subroutines used for the tests above
#===========================================================================
def part_get_invoice(self, username, offset=0):
"""
Get the invoice at offset for username
"""
return get_member(username).payment_account.invoices.order_by(Invoice.id.desc()).all()[offset]
def part_pay_invoice_manual(self, invoice, amount=None):
"""
Create a manual transaction against invoice
"""
txn = BillingTransaction()
txn.invoice = invoice
txn.status = 'complete'
txn.amount = amount or invoice.total_due
txn.provider = 'manual_test'
txn.reference = 'test'
Session.commit()
pass
def part_check_member_status(self, username, status):
"""
Check member status against status parameter
"""
member = get_member(username)
self.assertEqual(member.payment_account.billing_status, status)
pac_type = 'free' if status == 'failed' else member.payment_account.type
self.assertEqual(member.account_type, pac_type)
def part_sign_up(self, username, name_type="ind", type="plus"):
"""
Sign up to the site, create payment account (testing each required field), check account is waiting)
"""
self.sign_up_as(username)
# New member should have no payment account
member = get_member(username)
assert member.payment_account == None
# Check redirect from index when no payment account
response = self.app.get(
'/payments',
status=302
)
params = {
'_authentication_token': self.auth_token,
'name_type' : name_type,
'org_name' : ("%s's Organisation Name" % username) if name_type == 'org' else '',
'ind_name' : ("%s's Individual Name" % username) if name_type == 'ind' else '',
'address_1' : 'Address 1',
'address_2' : 'Address 2',
'address_town' : 'Town',
'address_county' : 'County',
'address_postal' : 'PO5 7AL',
'address_country' : 'GB',
'plan_%s' % type : 'blah',
}
# Check each required field for invalid
for field in ['name_type', '%s_name' % name_type, 'address_1', 'address_town', 'address_country', 'address_postal']:
params_invalid = copy.deepcopy(params)
params_invalid[field] = ''
response = self.app.post(
url('payments', format="json"),
params=params_invalid,
status=400
)
# Create a payment account
response = self.app.post(
url('payments', format='json'),
params=params,
status=200
)
# Get member from DB
member = get_member(username)
# Set payment account number
self.payment_account_ids[username] = member.payment_account.id
# Check upgraded to plus account
assert member.payment_account.type == type
# Check name set on account, also tests index
response = self.app.get(
url('payments', format='json'),
status=200
)
assert (("%s's Individual Name" % username) if name_type == 'ind' else ("%s's Organisation Name" % username)) in response
# Check edit page
response = self.app.get(
url('edit_payment', id=self.payment_account_ids[username]),
status=200
)
# Change a field
params['_authentication_token'] = self.auth_token
params['address_postal'] = 'PO5_7AL_TESTING'
response = self.app.put(
url(controller='payments', id=self.payment_account_ids[username], action="update", format='json'),
params=params,
status=200
)
# Check field changed, also tests show!
response = self.app.get(
url('payments', id=self.payment_account_ids[username], format='json'),
status=200
)
assert 'PO5_7AL_TESTING' in response
# Get member from DB
member = get_member(username)
# Check have one invoice
invoices = member.payment_account.invoices
assert len(invoices.all()) == 1
# Check invoice is billed
assert invoices[0].status == 'billed'
# Check invoice total due > 0
assert invoices[0].total_due > 0
# Check invoice show
response = self.app.get(
url(controller='payment_actions', id=self.payment_account_ids[username], action='invoice', invoice_id=invoices[0].id),
status=200
)
assert 'Invoice' in response
# Should not send email as redirected to invoice already, plus account waiting sends overdue email :(
emails = getNumEmails()
self.run_task('run_invoice_tasks')
assert emails == getNumEmails()
# Get member from DB
member = get_member(username)
# Check account billing_status is currently 'waiting' (as the invoice is already due!)
assert member.payment_account.billing_status == 'waiting'
self.sign_up_as('%s_add_me'%username)
self.log_in_as(username)
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids[username], action='member_remove', format='json'),
params={
'_authentication_token': self.auth_token,
'username' : username,
},
status=400
)
members_len = len(get_member(username).payment_account.members)
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids[username], action='member_add', format='json'),
params={
'_authentication_token': self.auth_token,
'username' : '%s_add_me'%username,
},
status=200
)
assert len(get_member(username).payment_account.members) == members_len + 1
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids[username], action='member_remove', format='json'),
params={
'_authentication_token': self.auth_token,
'username' : '%s_add_me'%username,
},
status=200
)
assert len(get_member(username).payment_account.members) == members_len
def part_regrade(self, username, type="free"):
"""
Regrade account to type (default free)
"""
response = self.app.post(
url(controller='payment_actions', action='regrade', id=self.payment_account_ids[username], format="json"),
params=dict(
_authentication_token=self.auth_token,
new_type=type
),
status=200)
response = self.app.post(
url(controller='payment_actions', action='regrade', id=self.payment_account_ids[username], format="json"),
params=dict(
_authentication_token=self.auth_token,
new_type=type
),
status=400)
assert get_member(username).payment_account.type == type
def part_get_billing_account(self, username):
"""
Get the user's first billing account
"""
return get_member(username).payment_account.billing_accounts[0] | src/civicboom/tests/functional/test_payment.py | from civicboom.tests import *
from civicboom.lib.database.get_cached import get_member
from civicboom.model.payment import *
from civicboom.model.meta import Session
from sqlalchemy import or_, and_, not_, null
import copy, datetime, urlparse
class TestPaymentController(TestController):
"""
Tests for Payment Controller
"""
payment_account_ids = {}
def test_payment_all_ok(self):
self.server_datetime('now')
# Run invoice tasks to get any stray emails out of the way
self.run_task('run_invoice_tasks')
# Setup payment account numbers dict
self.payment_account_ids = {}
# Setup two accounts, ind and org & test invalid form entries for both!
self.part_sign_up('test_payment_ind', 'ind')
self.log_out()
self.part_sign_up('test_payment_org', 'org')
# Regrade org to free so as to get it out of the way...
self.part_regrade('test_payment_org', 'free')
self.part_regrade('test_payment_org', 'corp')
self.part_regrade('test_payment_org', 'free')
# Check 0 invoices still billed for free acct
assert len(get_member('test_payment_org').payment_account.invoices.filter(Invoice.status=='billed').all()) == 0
# Shift date to check for two day payment fail
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=1))
# Should not send email as two days max unpaid
emails = getNumEmails()
self.run_task('run_invoice_tasks')
assert emails == getNumEmails()
self.part_check_member_status('test_payment_ind', 'waiting')
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=2))
# Should send email as two days max unpaid
emails = getNumEmails()
self.run_task('run_invoice_tasks')
self.assertEqual(getNumEmails(), emails + 2) # 2 as admin is sent an email too
self.part_check_member_status('test_payment_ind', 'failed')
# Get outstanding invoice balance, ensure > 0 and manually pay 5 towards
outstanding = self.part_get_invoice('test_payment_ind').total_due
assert outstanding > 0
self.part_pay_invoice_manual(self.part_get_invoice('test_payment_ind'), 5)
# Check outstanding invoice balance changed, ensure > 0 and manually pay the rest
assert self.part_get_invoice('test_payment_ind').total_due == outstanding - 5
assert self.part_get_invoice('test_payment_ind').total_due > 0
self.part_pay_invoice_manual(self.part_get_invoice('test_payment_ind'), outstanding - 5)
# Check no outstanding balance on invoice, run invoice tasks to change invoice status
assert self.part_get_invoice('test_payment_ind').total_due == 0
self.run_task('run_invoice_tasks')
# Check invoice marked as paid & account is returned to "ok"
assert self.part_get_invoice('test_payment_ind').status == 'paid'
self.part_check_member_status('test_payment_ind', 'ok')
# Move to next month & check invoice created email
emails = getNumEmails()
invoices = len(get_member('test_payment_ind').payment_account.invoices.all())
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=27))
self.run_task('run_invoice_tasks')
self.assertEqual(getNumEmails(), emails + 2) # 2 as admin is sent an email too
self.assertEqual(len(get_member('test_payment_ind').payment_account.invoices.all()), invoices + 1)
self.assertIn('invoiced', getLastEmail().content_text)
# Move to invoice due date and test for invoice due email
emails = getNumEmails()
due_date = get_member('test_payment_ind').payment_account.invoices.filter(Invoice.status=='billed').one().due_date
self.server_datetime(datetime.datetime.combine(due_date, datetime.time(0,0,0)))
self.run_task('run_invoice_tasks')
self.assertEqual(getNumEmails(), emails + 2) # 2 as admin is sent an email too
self.assertIn('overdue', getLastEmail().content_text)
self.server_datetime('now')
def test_payment_ok_paypal(self):
self.server_datetime('now')
self.run_task('run_invoice_tasks')
self.part_sign_up('test_payment_paypal', 'org')
self.run_task('run_invoice_tasks')
self.part_check_member_status('test_payment_paypal', 'waiting')
outstanding = self.part_get_invoice('test_payment_paypal').total_due
# Get the invoice and call payment_begin for paypal
invoice = self.part_get_invoice('test_payment_paypal')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_begin', invoice_id=invoice.id, service="paypal_express"),
status=302
)
# Check correct response
assert 'sandbox.paypal.com' in response
# Grab 302'd url and token query parameter
redirect_location = response.response.headers['location']
redirect_token = urlparse.parse_qs(urlparse.urlparse(redirect_location).query)['token'][0]
# Check we got a "TESTTOKEN" back (actual token would be "TESTTOKEN-0" for e.g.)
assert 'TESTTOKEN' in redirect_token
# Get the invoice and call payment_return for paypal
invoice = self.part_get_invoice('test_payment_paypal')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_return', invoice_id=invoice.id, service="paypal_express", token=redirect_token, PayerID='rar'),
status=302
)
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_paypal')
print invoice, invoice.status, invoice.transactions[0].status, invoice.paid_total, invoice.total, invoice.total_due
assert invoice.status == 'paid'
self.part_check_member_status('test_payment_paypal', 'ok')
self.server_datetime('now')
def test_payment_ok_paypal_recurring(self):
self.server_datetime('now')
self.run_task('run_invoice_tasks')
self.part_sign_up('test_payment_paypal_rec', 'org')
self.run_task('run_invoice_tasks')
self.part_check_member_status('test_payment_paypal_rec', 'waiting')
outstanding = self.part_get_invoice('test_payment_paypal_rec').total_due
# Get the invoice and call payment_begin for paypal
invoice = self.part_get_invoice('test_payment_paypal_rec')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_begin', invoice_id=invoice.id, service="paypal_express", recurring="True"),
status=302
)
# Check correct response
assert 'sandbox.paypal.com' in response
# Grab 302'd url and token query parameter
redirect_location = response.response.headers['location']
redirect_token = urlparse.parse_qs(urlparse.urlparse(redirect_location).query)['token'][0]
# Check we got a "TESTTOKEN" back (actual token would be "TESTTOKEN-0" for e.g.)
assert 'TESTTOKEN' in redirect_token
# Get the invoice and call payment_return for paypal
invoice = self.part_get_invoice('test_payment_paypal_rec')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_return', invoice_id=invoice.id, service="paypal_express", token=redirect_token, PayerID='rar'),
status=302
)
# Check recurring billing account has been created
bacct = self.part_get_billing_account('test_payment_paypal_rec')
bacct_id = bacct.id
self.assertNotEqual(bacct, None)
self.assertIn('paypal_recurring', bacct.provider)
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_paypal_rec')
print invoice, invoice.status, invoice.transactions[0].status, invoice.paid_total, invoice.total, invoice.total_due
assert invoice.status == 'paid'
self.part_check_member_status('test_payment_paypal_rec', 'ok')
self.server_datetime('now')
# Move to next month & generate invoice so we can test (what greg believes to be) paypal recurring behaviour
self.server_datetime(datetime.datetime.now() + datetime.timedelta(days=27))
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_paypal_rec')
invoice_id = invoice.id
print invoice.due_date, invoice, invoice.status
self.assertEqual(invoice.status, 'billed')
self.run_task('run_billing_account_tasks')
transaction = Session.query(BillingTransaction).filter(and_(BillingTransaction.invoice_id==None,BillingTransaction.billing_account_id==bacct_id)).one()
self.assertIn('BP-TESTTOKEN', transaction.reference)
trans_id = transaction.id
self.run_task('run_match_billing_transactions')
transaction = Session.query(BillingTransaction).get(trans_id)
self.assertIsNotNone(transaction)
self.assertEqual(transaction.invoice_id, invoice_id)
self.assertIsNone(transaction.billing_account_id)
#self.run_task('')
# Cancel billing account
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids['test_payment_paypal_rec'], action='billing_account_deactivate', format='json'),
params={
'_authentication_token': self.auth_token,
'billing_account_id' : bacct_id,
},
status=200
)
self.server_datetime('now')
def test_payment_cancel_paypal(self):
self.server_datetime('now')
self.run_task('run_invoice_tasks')
self.part_sign_up('test_payment_pp_cancel', 'org')
self.run_task('run_invoice_tasks')
self.part_check_member_status('test_payment_pp_cancel', 'waiting')
outstanding = self.part_get_invoice('test_payment_pp_cancel').total_due
# Get the invoice and call payment_begin for paypal
invoice = self.part_get_invoice('test_payment_pp_cancel')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_begin', invoice_id=invoice.id, service="paypal_express"),
status=302
)
# Check correct response
assert 'sandbox.paypal.com' in response
# Grab 302'd url and token query parameter
redirect_location = response.response.headers['location']
redirect_token = urlparse.parse_qs(urlparse.urlparse(redirect_location).query)['token'][0]
# Check we got a "TESTTOKEN" back (actual token would be "<PASSWORD>" for e.g.)
assert 'TESTTOKEN' in redirect_token
# Get the invoice and call payment_return for paypal
invoice = self.part_get_invoice('test_payment_pp_cancel')
response = self.app.get(
url(controller='payment_actions', id=invoice.payment_account.id, action='payment_cancel', invoice_id=invoice.id, service="paypal_express", token=redirect_token, PayerID='rar'),
status=302
)
self.run_task('run_invoice_tasks')
invoice = self.part_get_invoice('test_payment_pp_cancel')
print invoice, invoice.status, invoice.transactions[0].status, invoice.paid_total, invoice.total, invoice.total_due
assert invoice.status == 'billed'
assert invoice.transactions[0].status == 'cancelled'
self.part_check_member_status('test_payment_pp_cancel', 'waiting')
self.server_datetime('now')
def test_all_billing_tasks(self):
self.server_datetime('now')
self.run_task('run_billing_tasks')
#===========================================================================
# Subroutines used for the tests above
#===========================================================================
def part_get_invoice(self, username, offset=0):
"""
Get the invoice at offset for username
"""
return get_member(username).payment_account.invoices.order_by(Invoice.id.desc()).all()[offset]
def part_pay_invoice_manual(self, invoice, amount=None):
"""
Create a manual transaction against invoice
"""
txn = BillingTransaction()
txn.invoice = invoice
txn.status = 'complete'
txn.amount = amount or invoice.total_due
txn.provider = 'manual_test'
txn.reference = 'test'
Session.commit()
pass
def part_check_member_status(self, username, status):
"""
Check member status against status parameter
"""
member = get_member(username)
self.assertEqual(member.payment_account.billing_status, status)
pac_type = 'free' if status == 'failed' else member.payment_account.type
self.assertEqual(member.account_type, pac_type)
def part_sign_up(self, username, name_type="ind", type="plus"):
"""
Sign up to the site, create payment account (testing each required field), check account is waiting)
"""
self.sign_up_as(username)
# New member should have no payment account
member = get_member(username)
assert member.payment_account == None
# Check redirect from index when no payment account
response = self.app.get(
'/payments',
status=302
)
params = {
'_authentication_token': self.auth_token,
'name_type' : name_type,
'org_name' : ("%s's Organisation Name" % username) if name_type == 'org' else '',
'ind_name' : ("%s's Individual Name" % username) if name_type == 'ind' else '',
'address_1' : 'Address 1',
'address_2' : 'Address 2',
'address_town' : 'Town',
'address_county' : 'County',
'address_postal' : 'PO5 7AL',
'address_country' : 'GB',
'plan_%s' % type : 'blah',
}
# Check each required field for invalid
for field in ['name_type', '%s_name' % name_type, 'address_1', 'address_town', 'address_country', 'address_postal']:
params_invalid = copy.deepcopy(params)
params_invalid[field] = ''
response = self.app.post(
url('payments', format="json"),
params=params_invalid,
status=400
)
# Create a payment account
response = self.app.post(
url('payments', format='json'),
params=params,
status=200
)
# Get member from DB
member = get_member(username)
# Set payment account number
self.payment_account_ids[username] = member.payment_account.id
# Check upgraded to plus account
assert member.payment_account.type == type
# Check name set on account, also tests index
response = self.app.get(
url('payments', format='json'),
status=200
)
assert (("%s's Individual Name" % username) if name_type == 'ind' else ("%s's Organisation Name" % username)) in response
# Check edit page
response = self.app.get(
url('edit_payment', id=self.payment_account_ids[username]),
status=200
)
# Change a field
params['_authentication_token'] = self.auth_token
params['address_postal'] = 'PO5_7AL_TESTING'
response = self.app.put(
url(controller='payments', id=self.payment_account_ids[username], action="update", format='json'),
params=params,
status=200
)
# Check field changed, also tests show!
response = self.app.get(
url('payments', id=self.payment_account_ids[username], format='json'),
status=200
)
assert 'PO5_7AL_TESTING' in response
# Get member from DB
member = get_member(username)
# Check have one invoice
invoices = member.payment_account.invoices
assert len(invoices.all()) == 1
# Check invoice is billed
assert invoices[0].status == 'billed'
# Check invoice total due > 0
assert invoices[0].total_due > 0
# Check invoice show
response = self.app.get(
url(controller='payment_actions', id=self.payment_account_ids[username], action='invoice', invoice_id=invoices[0].id),
status=200
)
assert 'Invoice' in response
# Should not send email as redirected to invoice already, plus account waiting sends overdue email :(
emails = getNumEmails()
self.run_task('run_invoice_tasks')
assert emails == getNumEmails()
# Get member from DB
member = get_member(username)
# Check account billing_status is currently 'waiting' (as the invoice is already due!)
assert member.payment_account.billing_status == 'waiting'
self.sign_up_as('%s_add_me'%username)
self.log_in_as(username)
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids[username], action='member_remove', format='json'),
params={
'_authentication_token': self.auth_token,
'username' : username,
},
status=400
)
members_len = len(get_member(username).payment_account.members)
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids[username], action='member_add', format='json'),
params={
'_authentication_token': self.auth_token,
'username' : '%s_add_me'%username,
},
status=200
)
assert len(get_member(username).payment_account.members) == members_len + 1
response = self.app.post(
url(controller='payment_actions', id=self.payment_account_ids[username], action='member_remove', format='json'),
params={
'_authentication_token': self.auth_token,
'username' : '%s_add_me'%username,
},
status=200
)
assert len(get_member(username).payment_account.members) == members_len
def part_regrade(self, username, type="free"):
"""
Regrade account to type (default free)
"""
response = self.app.post(
url(controller='payment_actions', action='regrade', id=self.payment_account_ids[username], format="json"),
params=dict(
_authentication_token=self.auth_token,
new_type=type
),
status=200)
response = self.app.post(
url(controller='payment_actions', action='regrade', id=self.payment_account_ids[username], format="json"),
params=dict(
_authentication_token=self.auth_token,
new_type=type
),
status=400)
assert get_member(username).payment_account.type == type
def part_get_billing_account(self, username):
"""
Get the user's first billing account
"""
return get_member(username).payment_account.billing_accounts[0] | 0.512449 | 0.315736 |
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.preprocessing import StandardScaler
class DualPerceptron:
def __init__(self, kernel, path):
self.kernel = kernel
self.x, self.y = DualPerceptron.get_data(path)
@staticmethod
def get_data(path):
df = pd.read_csv(path, delim_whitespace=True, header=None).values
data = df[:, :-1]
label = df[:, -1]
# normalize data
data = DualPerceptron.normalize(data)
return data, label
@staticmethod
def normalize(data):
std = StandardScaler()
std.fit(data)
std.transform(data)
return data
def mistakes(self, w):
temp = np.sum(self.k_vals(self.x) * w, axis=1)
result = (self.y * temp) <= 0
result = result.reshape(result.shape[0])
return result
def k_vals(self, test):
if self.kernel == 'gaussian':
vals = []
for test_point in test:
value = self.gaussian(test_point)
vals.append(value)
vals = np.array(vals)
elif self.kernel == 'linear':
vals = self.x.dot(test.T)
return vals
def fit(self):
w = np.zeros(self.x.shape[0])
x_mistake = self.x[self.mistakes(w)]
iters = 0
while len(x_mistake) > 0:
mistake_indices = self.mistakes(w)
w[mistake_indices] += self.y[mistake_indices].flatten()
x_mistake = self.x[mistake_indices]
print('Iteration: {} | Misclassified Points: {}'.format(iters, len(x_mistake)))
iters += 1
def gaussian(self, point, sigma=2):
return np.exp(-1 * (np.sum((self.x - point)**2, axis=1) / (sigma**2)))
# EXECUTION
# dp = DualPerceptron(kernel='gaussian', path='./data/perceptronData.txt')
# dp = DualPerceptron(kernel='linear', path='./data/twoSpirals.txt')
# dp = DualPerceptron(kernel='gaussian', path='./data/twoSpirals.txt')
dp.fit() | Dual-Perceptron/dual_perceptron.py | import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.preprocessing import StandardScaler
class DualPerceptron:
def __init__(self, kernel, path):
self.kernel = kernel
self.x, self.y = DualPerceptron.get_data(path)
@staticmethod
def get_data(path):
df = pd.read_csv(path, delim_whitespace=True, header=None).values
data = df[:, :-1]
label = df[:, -1]
# normalize data
data = DualPerceptron.normalize(data)
return data, label
@staticmethod
def normalize(data):
std = StandardScaler()
std.fit(data)
std.transform(data)
return data
def mistakes(self, w):
temp = np.sum(self.k_vals(self.x) * w, axis=1)
result = (self.y * temp) <= 0
result = result.reshape(result.shape[0])
return result
def k_vals(self, test):
if self.kernel == 'gaussian':
vals = []
for test_point in test:
value = self.gaussian(test_point)
vals.append(value)
vals = np.array(vals)
elif self.kernel == 'linear':
vals = self.x.dot(test.T)
return vals
def fit(self):
w = np.zeros(self.x.shape[0])
x_mistake = self.x[self.mistakes(w)]
iters = 0
while len(x_mistake) > 0:
mistake_indices = self.mistakes(w)
w[mistake_indices] += self.y[mistake_indices].flatten()
x_mistake = self.x[mistake_indices]
print('Iteration: {} | Misclassified Points: {}'.format(iters, len(x_mistake)))
iters += 1
def gaussian(self, point, sigma=2):
return np.exp(-1 * (np.sum((self.x - point)**2, axis=1) / (sigma**2)))
# EXECUTION
# dp = DualPerceptron(kernel='gaussian', path='./data/perceptronData.txt')
# dp = DualPerceptron(kernel='linear', path='./data/twoSpirals.txt')
# dp = DualPerceptron(kernel='gaussian', path='./data/twoSpirals.txt')
dp.fit() | 0.656988 | 0.453988 |
from aiogram import Bot, types
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.dispatcher import Dispatcher
from aiogram.utils.executor import start_webhook
from sqlalchemy import select
from sqlalchemy.orm import Session
from api_parsing.utils import (calculate_time, get_json_from_api,
get_schedule_message)
from db_connection import Groups, Users, create_engine_connection
from telegram_bot.keyboard import schedule_keyboard
from telegram_bot.messages import (HELP_MESSAGES, LOGGER_MESSAGES)
from telegram_bot.settings import (API_URL, TELEGRAM_TOKEN, WEBAPP_HOST,
WEBAPP_PORT, WEBHOOK_PATH,
WEBHOOK_URL, logger)
bot = Bot(token=TELEGRAM_TOKEN)
dispatcher = Dispatcher(bot)
dispatcher.middleware.setup(LoggingMiddleware())
db_engine = create_engine_connection()
session = Session(db_engine)
@dispatcher.message_handler(commands=['start'])
async def command_start(message: types.Message):
await message.answer(text=HELP_MESSAGES['start'],
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['help'])
async def command_help(message: types.Message):
await message.answer(text=HELP_MESSAGES['help'],
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['set_group'])
async def command_get_schedule(message: types.Message):
await message.answer(text=HELP_MESSAGES['set_group'],
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['group'])
async def command_group(message: types.Message):
user = session.scalars(select(Users).filter(
Users.chat_id == message.chat.id)).first()
if user is None:
await message.answer(HELP_MESSAGES['user_not_exist'],
reply_markup=schedule_keyboard)
else:
group = session.scalars(select(Groups).filter(
Groups.group_id == user.group_id)).first()
await message.answer(text=f'Ваша группа {group.group_name}',
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['get_schedule'])
async def send_schedule(message: types.Message):
user = session.scalars(select(Users).filter(
Users.chat_id == message.chat.id)).first()
if user is None:
await message.answer(HELP_MESSAGES['user_not_exist'],
reply_markup=schedule_keyboard)
else:
time = calculate_time()
response_json = await get_json_from_api(
url=API_URL,
group_id=user.group_id,
begin_date=time.get('start_time_str'),
end_date=time.get('end_time_str'))
if response_json:
schedule_message = await get_schedule_message(response_json)
await message.answer(text=schedule_message,
reply_markup=schedule_keyboard)
else:
await message.answer(text='На сегодня пар нет.',
reply_markup=schedule_keyboard)
@dispatcher.message_handler(
regexp=r"^[А-Я|а-я]*-\d{2,3}[А-Я|а-я]?-\d{2}")
async def command_set_group(message: types.Message):
group_name = message.text
group = session.scalars(select(Groups).filter(
Groups.group_name == group_name)).first()
if group is None:
logger.error(LOGGER_MESSAGES['invalid_group'])
await message.answer(text=LOGGER_MESSAGES['invalid_group'],
reply_markup=schedule_keyboard)
else:
user = session.scalars(select(Users).filter(
Users.chat_id == message.chat.id)).first()
if user is None:
new_user = Users(chat_id=message.chat.id,
group_id=group.group_id,
)
session.add(new_user)
session.commit()
await message.answer(text='Ваша группа записана',
reply_markup=schedule_keyboard)
else:
user.group_id = group.group_id
session.commit()
await message.answer(text='Ваша группа изменена',
reply_markup=schedule_keyboard)
@dispatcher.message_handler()
async def cant_talk(message: types.Message):
if message.text.startswith('/'):
await message.answer(HELP_MESSAGES['no_command'],
reply_markup=schedule_keyboard)
else:
await message.answer(HELP_MESSAGES['cant_talk'],
reply_markup=schedule_keyboard)
async def on_startup(dispatcher):
await bot.set_webhook(WEBHOOK_URL, drop_pending_updates=True)
logger.debug(LOGGER_MESSAGES['webhook_set'])
async def on_shutdown(dispatcher):
session.close()
await bot.delete_webhook()
logger.debug(LOGGER_MESSAGES['shutdown'])
def main():
start_webhook(
dispatcher=dispatcher,
webhook_path=WEBHOOK_PATH,
skip_updates=True,
on_startup=on_startup,
host=WEBAPP_HOST,
port=WEBAPP_PORT,
)
if __name__ == '__main__':
main() | telegram_bot/bot_main.py | from aiogram import Bot, types
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.dispatcher import Dispatcher
from aiogram.utils.executor import start_webhook
from sqlalchemy import select
from sqlalchemy.orm import Session
from api_parsing.utils import (calculate_time, get_json_from_api,
get_schedule_message)
from db_connection import Groups, Users, create_engine_connection
from telegram_bot.keyboard import schedule_keyboard
from telegram_bot.messages import (HELP_MESSAGES, LOGGER_MESSAGES)
from telegram_bot.settings import (API_URL, TELEGRAM_TOKEN, WEBAPP_HOST,
WEBAPP_PORT, WEBHOOK_PATH,
WEBHOOK_URL, logger)
bot = Bot(token=TELEGRAM_TOKEN)
dispatcher = Dispatcher(bot)
dispatcher.middleware.setup(LoggingMiddleware())
db_engine = create_engine_connection()
session = Session(db_engine)
@dispatcher.message_handler(commands=['start'])
async def command_start(message: types.Message):
await message.answer(text=HELP_MESSAGES['start'],
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['help'])
async def command_help(message: types.Message):
await message.answer(text=HELP_MESSAGES['help'],
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['set_group'])
async def command_get_schedule(message: types.Message):
await message.answer(text=HELP_MESSAGES['set_group'],
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['group'])
async def command_group(message: types.Message):
user = session.scalars(select(Users).filter(
Users.chat_id == message.chat.id)).first()
if user is None:
await message.answer(HELP_MESSAGES['user_not_exist'],
reply_markup=schedule_keyboard)
else:
group = session.scalars(select(Groups).filter(
Groups.group_id == user.group_id)).first()
await message.answer(text=f'Ваша группа {group.group_name}',
reply_markup=schedule_keyboard)
@dispatcher.message_handler(commands=['get_schedule'])
async def send_schedule(message: types.Message):
user = session.scalars(select(Users).filter(
Users.chat_id == message.chat.id)).first()
if user is None:
await message.answer(HELP_MESSAGES['user_not_exist'],
reply_markup=schedule_keyboard)
else:
time = calculate_time()
response_json = await get_json_from_api(
url=API_URL,
group_id=user.group_id,
begin_date=time.get('start_time_str'),
end_date=time.get('end_time_str'))
if response_json:
schedule_message = await get_schedule_message(response_json)
await message.answer(text=schedule_message,
reply_markup=schedule_keyboard)
else:
await message.answer(text='На сегодня пар нет.',
reply_markup=schedule_keyboard)
@dispatcher.message_handler(
regexp=r"^[А-Я|а-я]*-\d{2,3}[А-Я|а-я]?-\d{2}")
async def command_set_group(message: types.Message):
group_name = message.text
group = session.scalars(select(Groups).filter(
Groups.group_name == group_name)).first()
if group is None:
logger.error(LOGGER_MESSAGES['invalid_group'])
await message.answer(text=LOGGER_MESSAGES['invalid_group'],
reply_markup=schedule_keyboard)
else:
user = session.scalars(select(Users).filter(
Users.chat_id == message.chat.id)).first()
if user is None:
new_user = Users(chat_id=message.chat.id,
group_id=group.group_id,
)
session.add(new_user)
session.commit()
await message.answer(text='Ваша группа записана',
reply_markup=schedule_keyboard)
else:
user.group_id = group.group_id
session.commit()
await message.answer(text='Ваша группа изменена',
reply_markup=schedule_keyboard)
@dispatcher.message_handler()
async def cant_talk(message: types.Message):
if message.text.startswith('/'):
await message.answer(HELP_MESSAGES['no_command'],
reply_markup=schedule_keyboard)
else:
await message.answer(HELP_MESSAGES['cant_talk'],
reply_markup=schedule_keyboard)
async def on_startup(dispatcher):
await bot.set_webhook(WEBHOOK_URL, drop_pending_updates=True)
logger.debug(LOGGER_MESSAGES['webhook_set'])
async def on_shutdown(dispatcher):
session.close()
await bot.delete_webhook()
logger.debug(LOGGER_MESSAGES['shutdown'])
def main():
start_webhook(
dispatcher=dispatcher,
webhook_path=WEBHOOK_PATH,
skip_updates=True,
on_startup=on_startup,
host=WEBAPP_HOST,
port=WEBAPP_PORT,
)
if __name__ == '__main__':
main() | 0.314787 | 0.096365 |
from DB_Connection import db
import smtplib
import pyttsx3
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from beautifultable import BeautifulTable
from Narrator import Narrator
from Login import User_Name, PIN_Confirmlg
import datetime as T
y = T.datetime.now()
x = y.strftime("%d/%m/%Y")
f = db.cursor()
f.execute("SELECT * FROM `userinfo` WHERE username='" + User_Name + "' AND pin='" + PIN_Confirmlg + "'")
fd = f.fetchall()
for i in fd:
ID = i[0]
Name = i[1]
F_Name = i[2]
DOB = i[3]
Email = i[4]
User_Name = i[5]
PIN_Confirm = i[6]
Previous_Amount = i[7]
Deposit_Amount = i[8]
Withdrawal_Amount = i[9]
Current_Amount = i[10]
class Functions:
def Narrator(self, command):
engine = pyttsx3.init()
voices = engine.getProperty('voices')
rate = engine.getProperty('rate')
engine.setProperty('voice', voices[0].id)
engine.setProperty('rate', 175)
engine.say(command)
engine.runAndWait()
def profile(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nUser Profile"]
Table.rows.append([Name])
Table.rows.append([F_Name])
Table.rows.append([DOB])
Table.rows.append(["FATMMS54622JUN20" + str(ID)])
Table.rows.header = [" Name ", " Father Name ", " Birth Date ", " IBAN "]
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 16
Table.columns.padding_left[0] = 16
print(Table)
self.footer()
def user_credential(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nUser Credential"]
Table.rows.append([User_Name])
Table.rows.append([PIN_Confirm])
Table.rows.append(["FATMMS54622JUN20" + str(ID)])
Table.rows.header = [" User Name ", " PIN Code ", " IBAN "]
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 17
Table.columns.padding_left[0] = 16
print(Table)
self.footer()
def cash_deposit(self):
Narrator("Enter Your Money Amount to Deposit in Your Account : ")
Deposit_Cash = int(input())
Current_Amount = Deposit_Cash + Previous_Amount
Narrator(
f"\nPleasure To Inform You That {Deposit_Cash} Rupees has been Deposited Successfully in Your Account on " + x + " And Your Record will be Updated in 24 Hrs")
f = db.cursor()
f.execute(
"UPDATE `userinfo` SET current_amount='" + str(Current_Amount) + "' WHERE username='" + User_Name + "'")
f.execute("UPDATE `userinfo` SET deposit_amount='" + str(Deposit_Cash) + "' WHERE username='" + User_Name + "'")
db.commit()
def cash_withdrawal(self):
if Current_Amount < 100:
Narrator("Sorry You Cannot Withdrawal Money Because You Don't have Enough Money In Your Account ")
else:
Narrator("\nEnter Your Money Amount You Want To Withdrawal From Your Account You Have " + str(
Current_Amount) + " Rupees In Your Account ")
Withdrawal_Cash = int(input())
Current_Amount1 = Current_Amount - Withdrawal_Cash
Narrator(
f"You Have Successfully Withdrawal Amount Of {Withdrawal_Cash} Rupees From Your Account on " + x + " And Your Record will be Updated in 24 Hrs")
f = db.cursor()
f.execute(
"UPDATE `userinfo` SET current_amount='" + str(
Current_Amount1) + "' WHERE username='" + User_Name + "'")
f.execute("UPDATE `userinfo` SET withdrawal_amount ='" + str(
Withdrawal_Cash) + "' WHERE username='" + User_Name + "'")
db.commit()
def balance_enquiry(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nBalance Enquiry"]
Table.rows.append([" " * 30])
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 16
Table.columns.padding_left[0] = 16
table = BeautifulTable()
table.columns.header = ["Previous\nAmount", "Cash\nDeposite", "Cash\nWithdrawl", "Current\nAmount", "Date"]
table.rows.append([Previous_Amount, Deposit_Amount, Withdrawal_Amount, Current_Amount, x])
table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
table.columns.padding_right['Previous\nAmount'] = 5
table.columns.padding_right['Cash\nDeposite'] = 5
table.columns.padding_right['Cash\nWithdrawl'] = 5
table.columns.padding_right['Current\nAmount'] = 5
table.columns.padding_right['Date'] = 7
print(Table)
print("|" + "**" * 39 + "|")
print(table)
self.footer()
def receipt(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nAccount Summary/Receipt"]
Table.rows.append([Name])
Table.rows.append([F_Name])
Table.rows.append([DOB])
Table.rows.append(["FATMMS54622JUN20" + str(ID)])
Table.rows.header = [" Name ", " Father Name ", " Birth Date ", " IBAN "]
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 16
Table.columns.padding_left[0] = 16
table = BeautifulTable()
table.columns.header = ["Previous\nAmount", "Cash\nDeposite", "Cash\nWithdrawl", "Current\nAmount", "Date"]
table.rows.append([Previous_Amount, Deposit_Amount, Withdrawal_Amount, Current_Amount, x])
table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
table.columns.padding_right['Previous\nAmount'] = 5
table.columns.padding_right['Cash\nDeposite'] = 5
table.columns.padding_right['Cash\nWithdrawl'] = 5
table.columns.padding_right['Current\nAmount'] = 5
table.columns.padding_right['Date'] = 7
print(Table)
print("|" + "**" * 39 + "|")
print(table)
self.footer()
def change_pin(self):
while True:
old_PIN = input("Enter Your Old PIN Code : ")
if old_PIN == PIN_Confirm:
New_PIN = input("\nEnter Your New PIN Code : ")
while True:
New_PIN_Confirm = input("Enter Your New PIN Again : ")
if New_PIN == New_PIN_Confirm:
f = db.cursor()
f.execute(
"UPDATE `userinfo` SET pin='" + New_PIN_Confirm + "' WHERE username='" + User_Name + "'")
db.commit()
print("\nYour PIN Changed Successfully ")
break
else:
print("\nPIN New PIN Match Enter Your PIN Again ")
break
else:
print("\nPIN Not Match Enter Your PIN Again")
def delete_account(self):
print("\nDo You Really Want to Delete Your Account (Y / N) : ")
while True:
Key = input().upper()
if Key == "Y":
f = db.cursor()
f.execute("DELETE FROM `userinfo` WHERE username='" + User_Name + "'")
db.commit()
print("\nYour Account has been Deleted Sucessfully ")
break
elif Key == "N":
print("\nYou Have Canceled While Deleting Your Account")
break
else:
print("\nInvalid Key Press (Y or N) : ")
def footer(self):
ftr = ["|" + "mm" * 39 + "|",
"|_________________Copyright@ 2020 Fawad. All Rights Reserved________________v2_|",
"*~~~~~~~--------------~~First Pro Python Programm Ever~~----------------~~~~~~~*\n\n"]
for f in ftr:
print(f)
def send_email(self):
message = f"""<html> <body> <h1 style="text-align: center;color: red">Fawad ATM Management
System</h1><br><br> <h4 style="text-align: center;">Hey! Dear Most Strongly Welcome To FATMMS<br> Your
Account has been Created Successfully <br> Your Account Summary is </h4> <table style="width: 100%;"> <tr
style="background-color: green;color:white"> <th style="border: 2px solid #dddddd;text-align: center;padding:
8px;">Name</th> <th style="border: 2px solid #dddddd;text-align: center;padding: 8px;">F.Name</th> <th
style="border: 2px solid #dddddd;text-align: center;padding: 8px;">User Name</th> <th style="border: 2px
solid #dddddd;text-align: center;padding: 8px;">PIN</th> <th style="border: 2px solid #dddddd;text-align:
center;padding: 8px;">Amount</th> </tr>
<tr>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[1]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[2]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[4]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[5]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[6]}</b></td>
</tr>
</table>
<p style="position:fixed;bottom:0;width: 100%;background-color:black;color:yellow;text-align:center">Copyright@ 2020 Fawad. All Rights Reserved</P>
</body>
</html>"""
msg = MIMEMultipart()
msg["From"] = "Fawad ATM Managment System"
msg["To"] = i[1]
msg["Subject"] = "Most Strongly Welcome To FATMS"
msg["Bcc"] = i[7]
msg.attach(MIMEText(message, 'html'))
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login('<EMAIL>', '<PASSWORD>')
server.sendmail("<EMAIL>", i[7], msg.as_string())
server.quit()
print("\nPlease Check Your Email Inbox For Account Details") | Functionality.py | from DB_Connection import db
import smtplib
import pyttsx3
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from beautifultable import BeautifulTable
from Narrator import Narrator
from Login import User_Name, PIN_Confirmlg
import datetime as T
y = T.datetime.now()
x = y.strftime("%d/%m/%Y")
f = db.cursor()
f.execute("SELECT * FROM `userinfo` WHERE username='" + User_Name + "' AND pin='" + PIN_Confirmlg + "'")
fd = f.fetchall()
for i in fd:
ID = i[0]
Name = i[1]
F_Name = i[2]
DOB = i[3]
Email = i[4]
User_Name = i[5]
PIN_Confirm = i[6]
Previous_Amount = i[7]
Deposit_Amount = i[8]
Withdrawal_Amount = i[9]
Current_Amount = i[10]
class Functions:
def Narrator(self, command):
engine = pyttsx3.init()
voices = engine.getProperty('voices')
rate = engine.getProperty('rate')
engine.setProperty('voice', voices[0].id)
engine.setProperty('rate', 175)
engine.say(command)
engine.runAndWait()
def profile(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nUser Profile"]
Table.rows.append([Name])
Table.rows.append([F_Name])
Table.rows.append([DOB])
Table.rows.append(["FATMMS54622JUN20" + str(ID)])
Table.rows.header = [" Name ", " Father Name ", " Birth Date ", " IBAN "]
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 16
Table.columns.padding_left[0] = 16
print(Table)
self.footer()
def user_credential(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nUser Credential"]
Table.rows.append([User_Name])
Table.rows.append([PIN_Confirm])
Table.rows.append(["FATMMS54622JUN20" + str(ID)])
Table.rows.header = [" User Name ", " PIN Code ", " IBAN "]
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 17
Table.columns.padding_left[0] = 16
print(Table)
self.footer()
def cash_deposit(self):
Narrator("Enter Your Money Amount to Deposit in Your Account : ")
Deposit_Cash = int(input())
Current_Amount = Deposit_Cash + Previous_Amount
Narrator(
f"\nPleasure To Inform You That {Deposit_Cash} Rupees has been Deposited Successfully in Your Account on " + x + " And Your Record will be Updated in 24 Hrs")
f = db.cursor()
f.execute(
"UPDATE `userinfo` SET current_amount='" + str(Current_Amount) + "' WHERE username='" + User_Name + "'")
f.execute("UPDATE `userinfo` SET deposit_amount='" + str(Deposit_Cash) + "' WHERE username='" + User_Name + "'")
db.commit()
def cash_withdrawal(self):
if Current_Amount < 100:
Narrator("Sorry You Cannot Withdrawal Money Because You Don't have Enough Money In Your Account ")
else:
Narrator("\nEnter Your Money Amount You Want To Withdrawal From Your Account You Have " + str(
Current_Amount) + " Rupees In Your Account ")
Withdrawal_Cash = int(input())
Current_Amount1 = Current_Amount - Withdrawal_Cash
Narrator(
f"You Have Successfully Withdrawal Amount Of {Withdrawal_Cash} Rupees From Your Account on " + x + " And Your Record will be Updated in 24 Hrs")
f = db.cursor()
f.execute(
"UPDATE `userinfo` SET current_amount='" + str(
Current_Amount1) + "' WHERE username='" + User_Name + "'")
f.execute("UPDATE `userinfo` SET withdrawal_amount ='" + str(
Withdrawal_Cash) + "' WHERE username='" + User_Name + "'")
db.commit()
def balance_enquiry(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nBalance Enquiry"]
Table.rows.append([" " * 30])
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 16
Table.columns.padding_left[0] = 16
table = BeautifulTable()
table.columns.header = ["Previous\nAmount", "Cash\nDeposite", "Cash\nWithdrawl", "Current\nAmount", "Date"]
table.rows.append([Previous_Amount, Deposit_Amount, Withdrawal_Amount, Current_Amount, x])
table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
table.columns.padding_right['Previous\nAmount'] = 5
table.columns.padding_right['Cash\nDeposite'] = 5
table.columns.padding_right['Cash\nWithdrawl'] = 5
table.columns.padding_right['Current\nAmount'] = 5
table.columns.padding_right['Date'] = 7
print(Table)
print("|" + "**" * 39 + "|")
print(table)
self.footer()
def receipt(self):
Table = BeautifulTable()
Table.columns.header = ["*Fawad ATM Management System*\nAccount Summary/Receipt"]
Table.rows.append([Name])
Table.rows.append([F_Name])
Table.rows.append([DOB])
Table.rows.append(["FATMMS54622JUN20" + str(ID)])
Table.rows.header = [" Name ", " Father Name ", " Birth Date ", " IBAN "]
Table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
Table.columns.padding_right[0] = 16
Table.columns.padding_left[0] = 16
table = BeautifulTable()
table.columns.header = ["Previous\nAmount", "Cash\nDeposite", "Cash\nWithdrawl", "Current\nAmount", "Date"]
table.rows.append([Previous_Amount, Deposit_Amount, Withdrawal_Amount, Current_Amount, x])
table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
table.columns.padding_right['Previous\nAmount'] = 5
table.columns.padding_right['Cash\nDeposite'] = 5
table.columns.padding_right['Cash\nWithdrawl'] = 5
table.columns.padding_right['Current\nAmount'] = 5
table.columns.padding_right['Date'] = 7
print(Table)
print("|" + "**" * 39 + "|")
print(table)
self.footer()
def change_pin(self):
while True:
old_PIN = input("Enter Your Old PIN Code : ")
if old_PIN == PIN_Confirm:
New_PIN = input("\nEnter Your New PIN Code : ")
while True:
New_PIN_Confirm = input("Enter Your New PIN Again : ")
if New_PIN == New_PIN_Confirm:
f = db.cursor()
f.execute(
"UPDATE `userinfo` SET pin='" + New_PIN_Confirm + "' WHERE username='" + User_Name + "'")
db.commit()
print("\nYour PIN Changed Successfully ")
break
else:
print("\nPIN New PIN Match Enter Your PIN Again ")
break
else:
print("\nPIN Not Match Enter Your PIN Again")
def delete_account(self):
print("\nDo You Really Want to Delete Your Account (Y / N) : ")
while True:
Key = input().upper()
if Key == "Y":
f = db.cursor()
f.execute("DELETE FROM `userinfo` WHERE username='" + User_Name + "'")
db.commit()
print("\nYour Account has been Deleted Sucessfully ")
break
elif Key == "N":
print("\nYou Have Canceled While Deleting Your Account")
break
else:
print("\nInvalid Key Press (Y or N) : ")
def footer(self):
ftr = ["|" + "mm" * 39 + "|",
"|_________________Copyright@ 2020 Fawad. All Rights Reserved________________v2_|",
"*~~~~~~~--------------~~First Pro Python Programm Ever~~----------------~~~~~~~*\n\n"]
for f in ftr:
print(f)
def send_email(self):
message = f"""<html> <body> <h1 style="text-align: center;color: red">Fawad ATM Management
System</h1><br><br> <h4 style="text-align: center;">Hey! Dear Most Strongly Welcome To FATMMS<br> Your
Account has been Created Successfully <br> Your Account Summary is </h4> <table style="width: 100%;"> <tr
style="background-color: green;color:white"> <th style="border: 2px solid #dddddd;text-align: center;padding:
8px;">Name</th> <th style="border: 2px solid #dddddd;text-align: center;padding: 8px;">F.Name</th> <th
style="border: 2px solid #dddddd;text-align: center;padding: 8px;">User Name</th> <th style="border: 2px
solid #dddddd;text-align: center;padding: 8px;">PIN</th> <th style="border: 2px solid #dddddd;text-align:
center;padding: 8px;">Amount</th> </tr>
<tr>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[1]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[2]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[4]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[5]}</b></td>
<td style="border: 2px solid #dddddd;text-align: center;padding: 8px;"><b>{i[6]}</b></td>
</tr>
</table>
<p style="position:fixed;bottom:0;width: 100%;background-color:black;color:yellow;text-align:center">Copyright@ 2020 Fawad. All Rights Reserved</P>
</body>
</html>"""
msg = MIMEMultipart()
msg["From"] = "Fawad ATM Managment System"
msg["To"] = i[1]
msg["Subject"] = "Most Strongly Welcome To FATMS"
msg["Bcc"] = i[7]
msg.attach(MIMEText(message, 'html'))
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login('<EMAIL>', '<PASSWORD>')
server.sendmail("<EMAIL>", i[7], msg.as_string())
server.quit()
print("\nPlease Check Your Email Inbox For Account Details") | 0.283484 | 0.08472 |
import pickle
from tqdm import tqdm
import pandas as pd
# Read in the datasets
# Player statistics data
path = open(r'C:\Users\charl\Desktop\FantasyFootball\Players\player_stats.pk', 'rb')
player_stats = pickle.load(path)
# results data for name mapping
path = open(r'C:\Users\charl\Desktop\FantasyFootball\Results\results_stats.pk', 'rb')
result_stats = pickle.load(path)
# Rename teams to match results_stats dictionary
team_name_map = {}
# sort both by alphabetical order so that they can be mapped in a loop
teams_1 = list(set([v['details']['club'] for k,v in player_stats.items()]))
teams_1.sort()
teams_2 = [k for k,v in result_stats.items()]
teams_2.sort()
for team in enumerate(teams_1):
team_name_map[team[1]] = teams_2[team[0]]
# Clean the dataframes for each player
for name, stats in tqdm(player_stats.items()):
# clean the history dataframe
temp_df = player_stats[name]['history']
try:
temp_df['Season'] = [x.split('/')[0] for x in temp_df['Season']]
temp_df['£'] = [float(x.split('£')[1]) for x in temp_df['£']]
player_stats[name]['history'] = temp_df
except KeyError:
player_stats[name]['history'] = None
# clean the stats dataframe
temp_df = player_stats[name]['stats']
try:
drop_row = temp_df.loc[temp_df['OPP'] == [x for x in temp_df['OPP'] if len(x.split(' ')) < 5][0]].index[0]
temp_df = temp_df.drop([drop_row])
except IndexError:
pass
# Create new columns using the strings in the opposition column
temp_df['home_team'] = [x.split(' ')[0] if '(A)' in x else player_stats[name]['details']['club'] for x in temp_df['OPP']]
temp_df['away_team'] = [x.split(' ')[0] if '(H)' in x else player_stats[name]['details']['club'] for x in temp_df['OPP']]
temp_df['home_score'] = [x.split(' ')[2] for x in temp_df['OPP']]
temp_df['away_score'] = [x.split(' ')[4] for x in temp_df['OPP']]
temp_df['£'] = [float(x.split('£')[1]) for x in temp_df['£']]
# Delete the opposition column
del temp_df['OPP']
# Rename the columns
temp_df.columns = ['GameWeek','Points','MinutesPlayed','GoalsScored','Assists',
'CleanSheets','GoalsConceded','OwnGoals',
'PenaltySaves','PenaltyMisses','YellowCards',
'RedCards','Saves','Bonus','BonusPointSystem',
'Influence','Creativity','Threat','IctIndex',
'NetTransfers', 'SelectedBy', '£', 'home_team',
'away_team', 'home_goals', 'away_goals']
# Reorder columns
player_stats[name]['stats'] = temp_df[['GameWeek','home_team','away_team','home_goals',
'away_goals','Points','MinutesPlayed','GoalsScored','Assists',
'CleanSheets','GoalsConceded','OwnGoals',
'PenaltySaves','PenaltyMisses','YellowCards',
'RedCards','Saves','Bonus','BonusPointSystem',
'Influence','Creativity','Threat','IctIndex',
'NetTransfers', 'SelectedBy', '£']]
# Map full names onto club names
player_stats[name]['details']['club'] = team_name_map[player_stats[name]['details']['club']]
# Save dictionary to path as pickle file
path = open(r'C:\Users\charl\Desktop\FantasyFootball\Players\player_stats_cleaned.pk', 'wb')
pickle.dump(player_stats, path) | tests/Clean_PlayerStats.py | import pickle
from tqdm import tqdm
import pandas as pd
# Read in the datasets
# Player statistics data
path = open(r'C:\Users\charl\Desktop\FantasyFootball\Players\player_stats.pk', 'rb')
player_stats = pickle.load(path)
# results data for name mapping
path = open(r'C:\Users\charl\Desktop\FantasyFootball\Results\results_stats.pk', 'rb')
result_stats = pickle.load(path)
# Rename teams to match results_stats dictionary
team_name_map = {}
# sort both by alphabetical order so that they can be mapped in a loop
teams_1 = list(set([v['details']['club'] for k,v in player_stats.items()]))
teams_1.sort()
teams_2 = [k for k,v in result_stats.items()]
teams_2.sort()
for team in enumerate(teams_1):
team_name_map[team[1]] = teams_2[team[0]]
# Clean the dataframes for each player
for name, stats in tqdm(player_stats.items()):
# clean the history dataframe
temp_df = player_stats[name]['history']
try:
temp_df['Season'] = [x.split('/')[0] for x in temp_df['Season']]
temp_df['£'] = [float(x.split('£')[1]) for x in temp_df['£']]
player_stats[name]['history'] = temp_df
except KeyError:
player_stats[name]['history'] = None
# clean the stats dataframe
temp_df = player_stats[name]['stats']
try:
drop_row = temp_df.loc[temp_df['OPP'] == [x for x in temp_df['OPP'] if len(x.split(' ')) < 5][0]].index[0]
temp_df = temp_df.drop([drop_row])
except IndexError:
pass
# Create new columns using the strings in the opposition column
temp_df['home_team'] = [x.split(' ')[0] if '(A)' in x else player_stats[name]['details']['club'] for x in temp_df['OPP']]
temp_df['away_team'] = [x.split(' ')[0] if '(H)' in x else player_stats[name]['details']['club'] for x in temp_df['OPP']]
temp_df['home_score'] = [x.split(' ')[2] for x in temp_df['OPP']]
temp_df['away_score'] = [x.split(' ')[4] for x in temp_df['OPP']]
temp_df['£'] = [float(x.split('£')[1]) for x in temp_df['£']]
# Delete the opposition column
del temp_df['OPP']
# Rename the columns
temp_df.columns = ['GameWeek','Points','MinutesPlayed','GoalsScored','Assists',
'CleanSheets','GoalsConceded','OwnGoals',
'PenaltySaves','PenaltyMisses','YellowCards',
'RedCards','Saves','Bonus','BonusPointSystem',
'Influence','Creativity','Threat','IctIndex',
'NetTransfers', 'SelectedBy', '£', 'home_team',
'away_team', 'home_goals', 'away_goals']
# Reorder columns
player_stats[name]['stats'] = temp_df[['GameWeek','home_team','away_team','home_goals',
'away_goals','Points','MinutesPlayed','GoalsScored','Assists',
'CleanSheets','GoalsConceded','OwnGoals',
'PenaltySaves','PenaltyMisses','YellowCards',
'RedCards','Saves','Bonus','BonusPointSystem',
'Influence','Creativity','Threat','IctIndex',
'NetTransfers', 'SelectedBy', '£']]
# Map full names onto club names
player_stats[name]['details']['club'] = team_name_map[player_stats[name]['details']['club']]
# Save dictionary to path as pickle file
path = open(r'C:\Users\charl\Desktop\FantasyFootball\Players\player_stats_cleaned.pk', 'wb')
pickle.dump(player_stats, path) | 0.165054 | 0.193414 |
__author__ = "<NAME> (<EMAIL>)"
import copy
import json
import numpy as np
import scipy.sparse as spsp
from thread2vec.common import get_package_path
def get_data(dataset, scale):
if dataset == "youtube":
item_to_userset_filepath = get_package_path() + "/data_folder/anonymized_data/youtube/item_to_userset_" + scale + ".txt"
anonymize_user_filepath = get_package_path() + "/data_folder/anonymized_data/youtube/anonymize_user_" + scale + ".txt"
popularity_filepath = get_package_path() + "/data_folder/anonymized_data/youtube/item_to_popularity.txt"
anonymous_coward_name = repr(0)
top_users = 200001
total_number_of_items = 516995
elif dataset == "reddit":
item_to_userset_filepath = get_package_path() + "/data_folder/anonymized_data/reddit/item_to_userset_" + scale + ".txt"
anonymize_user_filepath = get_package_path() + "/data_folder/anonymized_data/reddit/anonymize_user_" + scale + ".txt"
popularity_filepath = get_package_path() + "/data_folder/anonymized_data/reddit/item_to_popularity.txt"
anonymous_coward_name = repr(0)
top_users = 20000
total_number_of_items = 35844
else:
raise ValueError("Invalid dataset.")
# Read popularity values.
bad_popularity_items = list()
popularity_matrix = np.empty((total_number_of_items, 4), dtype=np.float32)
with open(popularity_filepath, "r") as fp:
file_row = next(fp)
item_counter = 0
for file_row in fp:
clean_row = file_row.strip().split("\t")
if clean_row[0] == "None":
popularity_matrix[item_counter, 0] = np.nan
popularity_matrix[item_counter, 1] = np.nan
popularity_matrix[item_counter, 2] = np.nan
popularity_matrix[item_counter, 3] = np.nan
bad_popularity_items.append(item_counter)
else:
popularity_matrix[item_counter, 0] = float(clean_row[0])
popularity_matrix[item_counter, 1] = float(clean_row[1])
popularity_matrix[item_counter, 2] = float(clean_row[2])
popularity_matrix[item_counter, 3] = float(clean_row[3])
item_counter += 1
bad_popularity_items = np.array(bad_popularity_items, dtype=np.int32)
# Read user anonymizer.
anonymize_user = dict()
with open(anonymize_user_filepath, "r") as fp:
for file_row in fp:
clean_row = file_row.strip().split("\t")
anonymize_user[clean_row[0]] = int(clean_row[1])
total_number_of_users = len(anonymize_user)
true_anonymize_user = copy.copy(anonymize_user)
user_list = list()
for i in range(total_number_of_users):
user_list.append(None)
for k, v in anonymize_user.items():
user_list[v] = k
anonymous_coward_within_discussion = anonymize_user[anonymous_coward_name]
# Read item to userset.
item_to_user_row = list()
item_to_user_col = list()
item_to_user_matrix = spsp.coo_matrix((np.array(list(), dtype=np.int32),
(np.array(list(), dtype=np.int32),
np.array(list(), dtype=np.int32))),
shape=(total_number_of_items,
total_number_of_users))
item_to_user_matrix = spsp.csc_matrix(item_to_user_matrix)
with open(item_to_userset_filepath, "r") as fp:
counter = 0
for file_row in fp:
clean_row = file_row.strip().split("\t")
for user in clean_row[1:]:
item_to_user_row.append(int(clean_row[0]))
item_to_user_col.append(int(user))
counter += 1
if counter % 10000 == 0:
item_to_user_row = np.array(item_to_user_row, dtype=np.int32)
item_to_user_col = np.array(item_to_user_col, dtype=np.int32)
item_to_user_data = np.ones_like(item_to_user_row, dtype=np.int32)
item_to_user_matrix_to_add = spsp.coo_matrix((item_to_user_data,
(item_to_user_row,
item_to_user_col)),
shape=(total_number_of_items,
total_number_of_users))
item_to_user_matrix_to_add = spsp.csc_matrix(item_to_user_matrix_to_add)
item_to_user_matrix = item_to_user_matrix + item_to_user_matrix_to_add
item_to_user_row = list()
item_to_user_col = list()
item_to_user_row = np.array(item_to_user_row, dtype=np.int32)
item_to_user_col = np.array(item_to_user_col, dtype=np.int32)
item_to_user_data = np.ones_like(item_to_user_row, dtype=np.int32)
item_to_user_matrix_to_add = spsp.coo_matrix((item_to_user_data,
(item_to_user_row,
item_to_user_col)),
shape=(total_number_of_items,
total_number_of_users))
item_to_user_matrix_to_add = spsp.csc_matrix(item_to_user_matrix_to_add)
item_to_user_matrix = item_to_user_matrix + item_to_user_matrix_to_add
if top_users is not None:
user_to_item_distribution = item_to_user_matrix.sum(axis=0)
user_indices_sorted = np.empty(top_users, dtype=np.int32)
user_indices_sorted_to_add = np.argsort(user_to_item_distribution)[0, -top_users:]
user_indices_sorted[:] = user_indices_sorted_to_add
user_indices_sorted = user_indices_sorted[user_indices_sorted != anonymous_coward_within_discussion]
user_indices_sorted_set = set(list(user_indices_sorted))
filtered_item_to_user_matrix = item_to_user_matrix[:, user_indices_sorted]
new_user_list = list()
new_anonymize_user = dict()
counter = 0
for user in user_list:
if anonymize_user[user] in user_indices_sorted_set:
new_user_list.append(user)
new_anonymize_user[user] = counter
counter += 1
user_list = new_user_list
anonymize_user = new_anonymize_user
else:
top_users = total_number_of_users
user_to_item_distribution = np.empty(top_users, dtype=np.int32)
user_to_item_distribution[:] = item_to_user_matrix.sum(axis=0)[0, :]
user_indices_sorted = np.arange(user_to_item_distribution.size, dtype=np.int32)
user_indices_sorted = user_indices_sorted[user_to_item_distribution > 1]
user_indices_sorted = user_indices_sorted[user_indices_sorted != anonymous_coward_within_discussion]
user_indices_sorted_set = set(list(user_indices_sorted))
filtered_item_to_user_matrix = item_to_user_matrix[:, user_indices_sorted]
new_user_list = list()
new_anonymize_user = dict()
counter = 0
for user in user_list:
if anonymize_user[user] in user_indices_sorted_set:
new_user_list.append(user)
new_anonymize_user[user] = counter
counter += 1
user_list = new_user_list
anonymize_user = new_anonymize_user
# item_to_user_distribution = filtered_item_to_user_matrix.sum(axis=1)
# item_to_user_distribution = item_to_user_distribution[item_to_user_distribution > 1]
item_to_user_distribution = np.empty(total_number_of_items, dtype=np.int32)
item_to_user_distribution[:] = filtered_item_to_user_matrix.sum(axis=1)[:, 0].transpose()
item_indices_sorted = np.arange(total_number_of_items, dtype=np.int32)
item_indices_sorted = item_indices_sorted[item_to_user_distribution > 0]
item_indices_sorted = np.setdiff1d(item_indices_sorted, bad_popularity_items)
filtered_item_to_user_matrix = spsp.csr_matrix(filtered_item_to_user_matrix)
filtered_item_to_user_matrix = filtered_item_to_user_matrix[item_indices_sorted, :]
popularity_matrix = popularity_matrix[item_indices_sorted, :]
user_to_item_distribution = np.empty(len(anonymize_user), dtype=np.int32)
user_to_item_distribution[:] = filtered_item_to_user_matrix.sum(axis=0)[0, :]
user_indices_sorted = np.arange(user_to_item_distribution.size, dtype=np.int32)
user_indices_sorted = user_indices_sorted[user_to_item_distribution > 0]
user_indices_sorted = user_indices_sorted[user_indices_sorted != anonymous_coward_within_discussion]
user_indices_sorted_set = set(list(user_indices_sorted))
filtered_item_to_user_matrix = filtered_item_to_user_matrix[:, user_indices_sorted]
new_user_list = list()
new_anonymize_user = dict()
counter = 0
for user in user_list:
if anonymize_user[user] in user_indices_sorted_set:
new_user_list.append(user)
new_anonymize_user[user] = counter
counter += 1
user_list = new_user_list
anonymize_user = new_anonymize_user
true_user_id_to_user_id = dict()
for user in user_list:
k = true_anonymize_user[user]
v = anonymize_user[user]
true_user_id_to_user_id[k] = v
index_1 = int(np.ceil(filtered_item_to_user_matrix.shape[0] * 0.5))
index_2 = int(np.ceil(filtered_item_to_user_matrix.shape[0] * 0.75))
index_permutation = np.random.permutation(np.arange(filtered_item_to_user_matrix.shape[0], dtype=np.int32))
train = index_permutation[:index_1]
val = index_permutation[index_1:index_2]
test = index_permutation[index_2:]
data_splits = (train, val, test)
data = dict()
data["filtered_item_to_user_matrix"] = filtered_item_to_user_matrix
data["popularity_matrix"] = popularity_matrix
data["item_indices_sorted"] = item_indices_sorted
data["anonymize_user"] = anonymize_user
data["true_user_id_to_user_id"] = true_user_id_to_user_id
data["user_list"] = user_list
data["number_of_items"] = filtered_item_to_user_matrix.shape[0]
data["number_of_users"] = filtered_item_to_user_matrix.shape[1]
data["data_splits"] = data_splits
return data
def read_indices(dataset):
if dataset == "youtube":
indices_filepath = get_package_path() + "/data_folder/uniform_data/youtube/data_splits.txt"
elif dataset == "reddit":
indices_filepath = get_package_path() + "/data_folder/uniform_data/reddit/data_splits.txt"
else:
raise ValueError
with open(indices_filepath, "r") as fp:
file_row = next(fp)
clean_row = file_row.strip().split("\t")
train_size = int(clean_row[0])
val_size = int(clean_row[1])
test_size = int(clean_row[2])
indices = np.empty(train_size + val_size + test_size, dtype=np.int32)
i = 0
for file_row in fp:
clean_row = file_row.strip()
indices[i] = int(clean_row)
i += 1
train = indices[:train_size]
val = indices[train_size:train_size + val_size]
test = indices[train_size + val_size:]
return train, val, test | thread2vec/representation/utility.py | __author__ = "<NAME> (<EMAIL>)"
import copy
import json
import numpy as np
import scipy.sparse as spsp
from thread2vec.common import get_package_path
def get_data(dataset, scale):
if dataset == "youtube":
item_to_userset_filepath = get_package_path() + "/data_folder/anonymized_data/youtube/item_to_userset_" + scale + ".txt"
anonymize_user_filepath = get_package_path() + "/data_folder/anonymized_data/youtube/anonymize_user_" + scale + ".txt"
popularity_filepath = get_package_path() + "/data_folder/anonymized_data/youtube/item_to_popularity.txt"
anonymous_coward_name = repr(0)
top_users = 200001
total_number_of_items = 516995
elif dataset == "reddit":
item_to_userset_filepath = get_package_path() + "/data_folder/anonymized_data/reddit/item_to_userset_" + scale + ".txt"
anonymize_user_filepath = get_package_path() + "/data_folder/anonymized_data/reddit/anonymize_user_" + scale + ".txt"
popularity_filepath = get_package_path() + "/data_folder/anonymized_data/reddit/item_to_popularity.txt"
anonymous_coward_name = repr(0)
top_users = 20000
total_number_of_items = 35844
else:
raise ValueError("Invalid dataset.")
# Read popularity values.
bad_popularity_items = list()
popularity_matrix = np.empty((total_number_of_items, 4), dtype=np.float32)
with open(popularity_filepath, "r") as fp:
file_row = next(fp)
item_counter = 0
for file_row in fp:
clean_row = file_row.strip().split("\t")
if clean_row[0] == "None":
popularity_matrix[item_counter, 0] = np.nan
popularity_matrix[item_counter, 1] = np.nan
popularity_matrix[item_counter, 2] = np.nan
popularity_matrix[item_counter, 3] = np.nan
bad_popularity_items.append(item_counter)
else:
popularity_matrix[item_counter, 0] = float(clean_row[0])
popularity_matrix[item_counter, 1] = float(clean_row[1])
popularity_matrix[item_counter, 2] = float(clean_row[2])
popularity_matrix[item_counter, 3] = float(clean_row[3])
item_counter += 1
bad_popularity_items = np.array(bad_popularity_items, dtype=np.int32)
# Read user anonymizer.
anonymize_user = dict()
with open(anonymize_user_filepath, "r") as fp:
for file_row in fp:
clean_row = file_row.strip().split("\t")
anonymize_user[clean_row[0]] = int(clean_row[1])
total_number_of_users = len(anonymize_user)
true_anonymize_user = copy.copy(anonymize_user)
user_list = list()
for i in range(total_number_of_users):
user_list.append(None)
for k, v in anonymize_user.items():
user_list[v] = k
anonymous_coward_within_discussion = anonymize_user[anonymous_coward_name]
# Read item to userset.
item_to_user_row = list()
item_to_user_col = list()
item_to_user_matrix = spsp.coo_matrix((np.array(list(), dtype=np.int32),
(np.array(list(), dtype=np.int32),
np.array(list(), dtype=np.int32))),
shape=(total_number_of_items,
total_number_of_users))
item_to_user_matrix = spsp.csc_matrix(item_to_user_matrix)
with open(item_to_userset_filepath, "r") as fp:
counter = 0
for file_row in fp:
clean_row = file_row.strip().split("\t")
for user in clean_row[1:]:
item_to_user_row.append(int(clean_row[0]))
item_to_user_col.append(int(user))
counter += 1
if counter % 10000 == 0:
item_to_user_row = np.array(item_to_user_row, dtype=np.int32)
item_to_user_col = np.array(item_to_user_col, dtype=np.int32)
item_to_user_data = np.ones_like(item_to_user_row, dtype=np.int32)
item_to_user_matrix_to_add = spsp.coo_matrix((item_to_user_data,
(item_to_user_row,
item_to_user_col)),
shape=(total_number_of_items,
total_number_of_users))
item_to_user_matrix_to_add = spsp.csc_matrix(item_to_user_matrix_to_add)
item_to_user_matrix = item_to_user_matrix + item_to_user_matrix_to_add
item_to_user_row = list()
item_to_user_col = list()
item_to_user_row = np.array(item_to_user_row, dtype=np.int32)
item_to_user_col = np.array(item_to_user_col, dtype=np.int32)
item_to_user_data = np.ones_like(item_to_user_row, dtype=np.int32)
item_to_user_matrix_to_add = spsp.coo_matrix((item_to_user_data,
(item_to_user_row,
item_to_user_col)),
shape=(total_number_of_items,
total_number_of_users))
item_to_user_matrix_to_add = spsp.csc_matrix(item_to_user_matrix_to_add)
item_to_user_matrix = item_to_user_matrix + item_to_user_matrix_to_add
if top_users is not None:
user_to_item_distribution = item_to_user_matrix.sum(axis=0)
user_indices_sorted = np.empty(top_users, dtype=np.int32)
user_indices_sorted_to_add = np.argsort(user_to_item_distribution)[0, -top_users:]
user_indices_sorted[:] = user_indices_sorted_to_add
user_indices_sorted = user_indices_sorted[user_indices_sorted != anonymous_coward_within_discussion]
user_indices_sorted_set = set(list(user_indices_sorted))
filtered_item_to_user_matrix = item_to_user_matrix[:, user_indices_sorted]
new_user_list = list()
new_anonymize_user = dict()
counter = 0
for user in user_list:
if anonymize_user[user] in user_indices_sorted_set:
new_user_list.append(user)
new_anonymize_user[user] = counter
counter += 1
user_list = new_user_list
anonymize_user = new_anonymize_user
else:
top_users = total_number_of_users
user_to_item_distribution = np.empty(top_users, dtype=np.int32)
user_to_item_distribution[:] = item_to_user_matrix.sum(axis=0)[0, :]
user_indices_sorted = np.arange(user_to_item_distribution.size, dtype=np.int32)
user_indices_sorted = user_indices_sorted[user_to_item_distribution > 1]
user_indices_sorted = user_indices_sorted[user_indices_sorted != anonymous_coward_within_discussion]
user_indices_sorted_set = set(list(user_indices_sorted))
filtered_item_to_user_matrix = item_to_user_matrix[:, user_indices_sorted]
new_user_list = list()
new_anonymize_user = dict()
counter = 0
for user in user_list:
if anonymize_user[user] in user_indices_sorted_set:
new_user_list.append(user)
new_anonymize_user[user] = counter
counter += 1
user_list = new_user_list
anonymize_user = new_anonymize_user
# item_to_user_distribution = filtered_item_to_user_matrix.sum(axis=1)
# item_to_user_distribution = item_to_user_distribution[item_to_user_distribution > 1]
item_to_user_distribution = np.empty(total_number_of_items, dtype=np.int32)
item_to_user_distribution[:] = filtered_item_to_user_matrix.sum(axis=1)[:, 0].transpose()
item_indices_sorted = np.arange(total_number_of_items, dtype=np.int32)
item_indices_sorted = item_indices_sorted[item_to_user_distribution > 0]
item_indices_sorted = np.setdiff1d(item_indices_sorted, bad_popularity_items)
filtered_item_to_user_matrix = spsp.csr_matrix(filtered_item_to_user_matrix)
filtered_item_to_user_matrix = filtered_item_to_user_matrix[item_indices_sorted, :]
popularity_matrix = popularity_matrix[item_indices_sorted, :]
user_to_item_distribution = np.empty(len(anonymize_user), dtype=np.int32)
user_to_item_distribution[:] = filtered_item_to_user_matrix.sum(axis=0)[0, :]
user_indices_sorted = np.arange(user_to_item_distribution.size, dtype=np.int32)
user_indices_sorted = user_indices_sorted[user_to_item_distribution > 0]
user_indices_sorted = user_indices_sorted[user_indices_sorted != anonymous_coward_within_discussion]
user_indices_sorted_set = set(list(user_indices_sorted))
filtered_item_to_user_matrix = filtered_item_to_user_matrix[:, user_indices_sorted]
new_user_list = list()
new_anonymize_user = dict()
counter = 0
for user in user_list:
if anonymize_user[user] in user_indices_sorted_set:
new_user_list.append(user)
new_anonymize_user[user] = counter
counter += 1
user_list = new_user_list
anonymize_user = new_anonymize_user
true_user_id_to_user_id = dict()
for user in user_list:
k = true_anonymize_user[user]
v = anonymize_user[user]
true_user_id_to_user_id[k] = v
index_1 = int(np.ceil(filtered_item_to_user_matrix.shape[0] * 0.5))
index_2 = int(np.ceil(filtered_item_to_user_matrix.shape[0] * 0.75))
index_permutation = np.random.permutation(np.arange(filtered_item_to_user_matrix.shape[0], dtype=np.int32))
train = index_permutation[:index_1]
val = index_permutation[index_1:index_2]
test = index_permutation[index_2:]
data_splits = (train, val, test)
data = dict()
data["filtered_item_to_user_matrix"] = filtered_item_to_user_matrix
data["popularity_matrix"] = popularity_matrix
data["item_indices_sorted"] = item_indices_sorted
data["anonymize_user"] = anonymize_user
data["true_user_id_to_user_id"] = true_user_id_to_user_id
data["user_list"] = user_list
data["number_of_items"] = filtered_item_to_user_matrix.shape[0]
data["number_of_users"] = filtered_item_to_user_matrix.shape[1]
data["data_splits"] = data_splits
return data
def read_indices(dataset):
if dataset == "youtube":
indices_filepath = get_package_path() + "/data_folder/uniform_data/youtube/data_splits.txt"
elif dataset == "reddit":
indices_filepath = get_package_path() + "/data_folder/uniform_data/reddit/data_splits.txt"
else:
raise ValueError
with open(indices_filepath, "r") as fp:
file_row = next(fp)
clean_row = file_row.strip().split("\t")
train_size = int(clean_row[0])
val_size = int(clean_row[1])
test_size = int(clean_row[2])
indices = np.empty(train_size + val_size + test_size, dtype=np.int32)
i = 0
for file_row in fp:
clean_row = file_row.strip()
indices[i] = int(clean_row)
i += 1
train = indices[:train_size]
val = indices[train_size:train_size + val_size]
test = indices[train_size + val_size:]
return train, val, test | 0.261897 | 0.182753 |
import sys
import os
import datetime
import argparse
import chardet
import re
import noval.syntax.syntax as syntax
from noval.util import logger
import noval.util.utils as utils
import noval.syntax.lang as lang
import easyplugindev as epd
from easyplugindev import _
def getResourcePath():
from pkg_resources import resource_filename
path = resource_filename(__name__,'')
clone_local_img_path = os.path.join(path,"codecounter.png") # 导入同一个包下的文件.
return clone_local_img_path
def isFileExclude(fullFilePath,excludeDirs,excludeFiles):#目前尚未做排除单个文件的工作。
fullFilePath=fullFilePath.replace('\\','/')
for d in excludeDirs:
d = d.replace('\\','/')
if(fullFilePath.find(d)!=-1):
return True
return False
def isDirExclude(path,excludeDirs):
if path in excludeDirs:
return True
loopPath = os.path.dirname(root)
while loopPath:
if loopPath in excludeDirs:
return True
parentPath = os.path.dirname(loopPath)
if parentPath == loopPath:
break
loopPath = parentPath
return False
def isCommentLine(line,lineCommentFlag):
if lineCommentFlag == None:
return False
if line.startswith(lineCommentFlag):
return True
return False
def isBlockCommentStart(line,blockCommentStartFlag):
if blockCommentStartFlag == None:
return False
return isCommentLine(line,blockCommentStartFlag)
def isBlockCommentEnd(line,blockCommentEndFlag):
if blockCommentEndFlag == None:
return False
line = line.rstrip()
if line.endswith(blockCommentEndFlag):
return True
return False
def adaptEncoding(f):#自适应编码方式
text = f.read()
encoding=chardet.detect(text)['encoding']
if(encoding==None):#对于无法检测出编码的文件,可以跳过去。所以直接返回None等待处理。
return None
return text.decode(encoding)
blockCommentStartFlagDict={"md":None,'py':('\'\'\'','\"\"\"','r\"\"\"'),'c':('/*'),'css':'/*'}
blockCommentEndFlagDict={"md":None,'py':('\'\'\'','\"\"\"'),'c':('*/'),'css':'*/'}
lineCommentFlagDict={"md":None,'py':('#'),'c':('//'),'css':'/*'}
lineCommentEndDict={"css":"*/"}
def getCommentPatternByExtName(ext=''):
## try:
fileLexer=syntax.SyntaxThemeManager().GetLangLexerFromExt(ext)
langId=fileLexer.GetLangId()
if(langId==lang.ID_LANG_TXT):
return None,None,None,True
pattern=fileLexer.GetCommentPatterns()
if(len(pattern)==1):# 前三位分别是单行注释、多行注释开始、多行注释结束。第四位是是否为纯文本文件。
if(len(pattern[0])==1):
return pattern[0][0],None,None,False
else:
return pattern[0][0],pattern[0][1],None,False
elif(len(pattern)==2):
if(len(pattern[0])==1):
return pattern[0][1],pattern[1][0],pattern[1][1],False
else:
return pattern[0][0],pattern[0][1],pattern[1][0],False
else:
return pattern[2],pattern[0],pattern[1],False
def countPlainText(content):
validLinesCount=0
blankLinesCount=0
for i,line in enumerate(content):
if line.strip() == "":
blankLinesCount+=1
else:
validLinesCount+=1
return [0,blankLinesCount,0,blankLinesCount+validLinesCount]
def countFileLine(filePath):
global blockCommentStartFlagDict,blockCommentEndFlagDict,lineCommentFlag
fileType=filePath.split('.')[-1]
lineCommentFlag, blockCommentStartFlag, blockCommentEndFlag,isTxt=getCommentPatternByExtName(fileType)
with open(filePath,'rb') as f: # 打开文件开始扫描
content= adaptEncoding(f)
if(content==None):# 如果没有内容就返回[0,0,0]
return [0,0,0,0]
content = content.strip() # 预先剪掉content头部和尾部的赘余,以免计入文件结尾的空行。
lines = content.split('\n') # re.split(r"([\n])", content)# 正则表达式应用。
if(isTxt==True):# 如果没有“单行注释”这一说的话(说明多行注释也没有)
return countPlainText(lines)
isInBlockComment = False
count = 0
validLinesCount=0
commentLinesCount=0
blankLinesCount=0
for i,line in enumerate(lines): # 一行一行的扫描文件内部。
#print(i,line)
count += 1
line = line.strip()
if line == "":
blankLinesCount+=1
continue
if isInBlockComment==True:
commentLinesCount+=1
if isBlockCommentEnd(line,blockCommentEndFlag):#如果是注释结束,就将标识符置为否
isInBlockComment = False
continue
else:
continue
else:
if isBlockCommentStart(line,blockCommentStartFlag):#如果是注释开始,就将标识符置为是
isInBlockComment = True
commentLinesCount+=1
continue
if isCommentLine(line,lineCommentFlag):#如果是注释行,那么就跳转。
commentLinesCount+=1
continue
validLinesCount+=1
return [validLinesCount,blankLinesCount,commentLinesCount,validLinesCount+blankLinesCount+commentLinesCount]
def getFileNames(dirPath,fileList):
filenames=[]
if(dirPath!=''):
if(fileList!=[]):
raise Exception('不得同时输入文件列表和搜索的文件夹路径!')
else:
for root,dirnames,tmpFilenames in os.walk(dirPath):
root=os.path.abspath(root)
for filename in tmpFilenames:
fullFilePath=os.path.join(root,filename)
filenames.append(fullFilePath)
else:#如果入口参数是个列表的话,就这么统计。
filenames=fileList
return filenames
def countDirFileLines(dirPath='',fileList=[],excludeDirs=[],excludeFiles=[],includeExts=[],
progressBar=None,table=None,master=None,countingFlag=True):
if(master!=None):
table=master.table
countingFlag=master.countingFlag
excludeDirs=set(excludeDirs)
excludeFiles=set(excludeFiles)
includeExts=set(includeExts)
totalLineCount = [0,0,0] # 分别对应valid,comment和blank三种内容。
totalFileCount = 0
def isSupportedFileType(ext):
if ext in includeExts:
return True
else:
return False
filenames=getFileNames(dirPath=dirPath,fileList=fileList)
filesToWalk=len(filenames)# 取得需要遍历的文件数量列表。
walkedFiles=0
totalSumCount=0
for filename in filenames:
walkedFiles+=1
if(progressBar!=None):#在调用的时候,如果有进度条的选项,就更新它。
progressBar['value']=walkedFiles/filesToWalk*100
fileType=filename.split('.')[-1] # 取文件名的最后一项,也就是扩展名
if(isSupportedFileType(fileType)!=True): # 如果不是支持的文件类型,就跳过这个文件的扫描。
continue
if isFileExclude(filename,excludeDirs,excludeFiles):
continue
if not os.path.exists(filename):# 如果文件不存在,就跳过循环。
continue
countList= countFileLine(filename)
for i in range(3):
totalLineCount[i] += countList[i]
countSum=countList[3]
totalSumCount+=countSum
totalFileCount += 1
if(master!=None):
if(master.countingFlag==False):
master.progressBar['value']=0
master.clearResultTable()
return
master.table.insert("",0,values=[epd.formatPathForPlatform(filename)]+countList+[countSum])#构造列表,直接插入表格。
if(master!=None):
master.table.insert("",0,values=[_("Counted:%d Files. Total:")%totalFileCount]+totalLineCount+[totalSumCount])
master.startCountingButton.config(text=_("Start Counting!"))
master.countingFlag=False
return totalFileCount
if __name__ == "__main__":
pass
## r=countDirFileLines(r'C:\Users\hzy\Documents\python\NovalIDE\plugins\CodeCounter',excludeDirs=[],excludeFiles=[],includeExts=['py'])
## import noval.util.utils as utils
## utils.get_logger().info("sssssss") | plugins/CodeCounter/CodeCounter/CodeCounter.py | import sys
import os
import datetime
import argparse
import chardet
import re
import noval.syntax.syntax as syntax
from noval.util import logger
import noval.util.utils as utils
import noval.syntax.lang as lang
import easyplugindev as epd
from easyplugindev import _
def getResourcePath():
from pkg_resources import resource_filename
path = resource_filename(__name__,'')
clone_local_img_path = os.path.join(path,"codecounter.png") # 导入同一个包下的文件.
return clone_local_img_path
def isFileExclude(fullFilePath,excludeDirs,excludeFiles):#目前尚未做排除单个文件的工作。
fullFilePath=fullFilePath.replace('\\','/')
for d in excludeDirs:
d = d.replace('\\','/')
if(fullFilePath.find(d)!=-1):
return True
return False
def isDirExclude(path,excludeDirs):
if path in excludeDirs:
return True
loopPath = os.path.dirname(root)
while loopPath:
if loopPath in excludeDirs:
return True
parentPath = os.path.dirname(loopPath)
if parentPath == loopPath:
break
loopPath = parentPath
return False
def isCommentLine(line,lineCommentFlag):
if lineCommentFlag == None:
return False
if line.startswith(lineCommentFlag):
return True
return False
def isBlockCommentStart(line,blockCommentStartFlag):
if blockCommentStartFlag == None:
return False
return isCommentLine(line,blockCommentStartFlag)
def isBlockCommentEnd(line,blockCommentEndFlag):
if blockCommentEndFlag == None:
return False
line = line.rstrip()
if line.endswith(blockCommentEndFlag):
return True
return False
def adaptEncoding(f):#自适应编码方式
text = f.read()
encoding=chardet.detect(text)['encoding']
if(encoding==None):#对于无法检测出编码的文件,可以跳过去。所以直接返回None等待处理。
return None
return text.decode(encoding)
blockCommentStartFlagDict={"md":None,'py':('\'\'\'','\"\"\"','r\"\"\"'),'c':('/*'),'css':'/*'}
blockCommentEndFlagDict={"md":None,'py':('\'\'\'','\"\"\"'),'c':('*/'),'css':'*/'}
lineCommentFlagDict={"md":None,'py':('#'),'c':('//'),'css':'/*'}
lineCommentEndDict={"css":"*/"}
def getCommentPatternByExtName(ext=''):
## try:
fileLexer=syntax.SyntaxThemeManager().GetLangLexerFromExt(ext)
langId=fileLexer.GetLangId()
if(langId==lang.ID_LANG_TXT):
return None,None,None,True
pattern=fileLexer.GetCommentPatterns()
if(len(pattern)==1):# 前三位分别是单行注释、多行注释开始、多行注释结束。第四位是是否为纯文本文件。
if(len(pattern[0])==1):
return pattern[0][0],None,None,False
else:
return pattern[0][0],pattern[0][1],None,False
elif(len(pattern)==2):
if(len(pattern[0])==1):
return pattern[0][1],pattern[1][0],pattern[1][1],False
else:
return pattern[0][0],pattern[0][1],pattern[1][0],False
else:
return pattern[2],pattern[0],pattern[1],False
def countPlainText(content):
validLinesCount=0
blankLinesCount=0
for i,line in enumerate(content):
if line.strip() == "":
blankLinesCount+=1
else:
validLinesCount+=1
return [0,blankLinesCount,0,blankLinesCount+validLinesCount]
def countFileLine(filePath):
global blockCommentStartFlagDict,blockCommentEndFlagDict,lineCommentFlag
fileType=filePath.split('.')[-1]
lineCommentFlag, blockCommentStartFlag, blockCommentEndFlag,isTxt=getCommentPatternByExtName(fileType)
with open(filePath,'rb') as f: # 打开文件开始扫描
content= adaptEncoding(f)
if(content==None):# 如果没有内容就返回[0,0,0]
return [0,0,0,0]
content = content.strip() # 预先剪掉content头部和尾部的赘余,以免计入文件结尾的空行。
lines = content.split('\n') # re.split(r"([\n])", content)# 正则表达式应用。
if(isTxt==True):# 如果没有“单行注释”这一说的话(说明多行注释也没有)
return countPlainText(lines)
isInBlockComment = False
count = 0
validLinesCount=0
commentLinesCount=0
blankLinesCount=0
for i,line in enumerate(lines): # 一行一行的扫描文件内部。
#print(i,line)
count += 1
line = line.strip()
if line == "":
blankLinesCount+=1
continue
if isInBlockComment==True:
commentLinesCount+=1
if isBlockCommentEnd(line,blockCommentEndFlag):#如果是注释结束,就将标识符置为否
isInBlockComment = False
continue
else:
continue
else:
if isBlockCommentStart(line,blockCommentStartFlag):#如果是注释开始,就将标识符置为是
isInBlockComment = True
commentLinesCount+=1
continue
if isCommentLine(line,lineCommentFlag):#如果是注释行,那么就跳转。
commentLinesCount+=1
continue
validLinesCount+=1
return [validLinesCount,blankLinesCount,commentLinesCount,validLinesCount+blankLinesCount+commentLinesCount]
def getFileNames(dirPath,fileList):
filenames=[]
if(dirPath!=''):
if(fileList!=[]):
raise Exception('不得同时输入文件列表和搜索的文件夹路径!')
else:
for root,dirnames,tmpFilenames in os.walk(dirPath):
root=os.path.abspath(root)
for filename in tmpFilenames:
fullFilePath=os.path.join(root,filename)
filenames.append(fullFilePath)
else:#如果入口参数是个列表的话,就这么统计。
filenames=fileList
return filenames
def countDirFileLines(dirPath='',fileList=[],excludeDirs=[],excludeFiles=[],includeExts=[],
progressBar=None,table=None,master=None,countingFlag=True):
if(master!=None):
table=master.table
countingFlag=master.countingFlag
excludeDirs=set(excludeDirs)
excludeFiles=set(excludeFiles)
includeExts=set(includeExts)
totalLineCount = [0,0,0] # 分别对应valid,comment和blank三种内容。
totalFileCount = 0
def isSupportedFileType(ext):
if ext in includeExts:
return True
else:
return False
filenames=getFileNames(dirPath=dirPath,fileList=fileList)
filesToWalk=len(filenames)# 取得需要遍历的文件数量列表。
walkedFiles=0
totalSumCount=0
for filename in filenames:
walkedFiles+=1
if(progressBar!=None):#在调用的时候,如果有进度条的选项,就更新它。
progressBar['value']=walkedFiles/filesToWalk*100
fileType=filename.split('.')[-1] # 取文件名的最后一项,也就是扩展名
if(isSupportedFileType(fileType)!=True): # 如果不是支持的文件类型,就跳过这个文件的扫描。
continue
if isFileExclude(filename,excludeDirs,excludeFiles):
continue
if not os.path.exists(filename):# 如果文件不存在,就跳过循环。
continue
countList= countFileLine(filename)
for i in range(3):
totalLineCount[i] += countList[i]
countSum=countList[3]
totalSumCount+=countSum
totalFileCount += 1
if(master!=None):
if(master.countingFlag==False):
master.progressBar['value']=0
master.clearResultTable()
return
master.table.insert("",0,values=[epd.formatPathForPlatform(filename)]+countList+[countSum])#构造列表,直接插入表格。
if(master!=None):
master.table.insert("",0,values=[_("Counted:%d Files. Total:")%totalFileCount]+totalLineCount+[totalSumCount])
master.startCountingButton.config(text=_("Start Counting!"))
master.countingFlag=False
return totalFileCount
if __name__ == "__main__":
pass
## r=countDirFileLines(r'C:\Users\hzy\Documents\python\NovalIDE\plugins\CodeCounter',excludeDirs=[],excludeFiles=[],includeExts=['py'])
## import noval.util.utils as utils
## utils.get_logger().info("sssssss") | 0.069498 | 0.096535 |
from elit.utils.io_util import load_json
from elit.common.dataset import TransformDataset
class BinaryClassDataset(TransformDataset):
def load_file(self, filepath: str):
for sample in load_json(filepath):
yield sample
def create_features(sample: dict, tokenizer, max_line_length=128,
doc_stride=0, max_line_number=100,
max_job_length=512) -> dict:
if 'is_matched' in sample:
sample['label'] = sample['is_matched']
type_map = {'Profile': 0, 'Skills': 1, 'Work Experience': 2, 'Education': 3, 'Other': 4, 'Activities': 5}
job_desc = sample["applied_job_description"]
job_inputs = tokenizer.encode_plus(job_desc, None, add_special_tokens=True, max_length=max_job_length,
truncation=True)
job_input_ids = job_inputs["input_ids"]
job_input_tokens = [tokenizer._convert_id_to_token(tid) for tid in job_input_ids]
attention_mask = [1] * len(job_input_ids)
padding_length = max_job_length - len(job_input_ids)
job_input_ids = job_input_ids + ([tokenizer.pad_token_id] * padding_length)
job_attention_masks = attention_mask + ([0] * padding_length)
lines_input_ids = []
type_input_ids = []
attention_masks = []
input_lines_tokens = []
sections = sample.get('resume_data', sample.get('sections', None))
for section in sections:
type_s = section['type']
content = section['content'].replace("\n", " ").replace("\t", " ").replace("\\u", " ").lower().strip()
type_n = type_map[type_s]
tokens = tokenizer.tokenize(content)
if len(tokens) <= max_line_length - 2:
inputs = tokenizer.encode_plus(content, None, add_special_tokens=True,
max_length=max_line_length, truncation=True)
input_ids = inputs["input_ids"]
input_tokens = [tokenizer._convert_id_to_token(tid) for tid in input_ids]
attention_mask = [1] * len(input_ids)
padding_length = max_line_length - len(input_ids)
input_ids = input_ids + ([tokenizer.pad_token_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
attention_masks.append(attention_mask)
lines_input_ids.append(input_ids)
type_input_ids.append(type_n)
input_lines_tokens.append(input_tokens)
else:
doc_left_index = 0
doc_right_index = max_line_length - 2
while doc_left_index < len(tokens) - doc_stride:
if doc_right_index >= len(tokens):
doc_right_index = len(tokens)
new_line_tokens = tokens[doc_left_index:doc_right_index]
inputs = tokenizer.encode_plus(" ".join(new_line_tokens), None, add_special_tokens=True,
max_length=max_line_length)
input_ids = inputs["input_ids"]
input_tokens = [tokenizer._convert_id_to_token(tid) for tid in input_ids]
attention_mask = [1] * len(input_ids)
padding_length = max_line_length - len(input_ids)
input_ids = input_ids + ([tokenizer.pad_token_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
attention_masks.append(attention_mask)
lines_input_ids.append(input_ids)
type_input_ids.append(type_n)
input_lines_tokens.append(input_tokens)
doc_left_index = doc_right_index - doc_stride
doc_right_index = doc_left_index + max_line_length - 2
if len(lines_input_ids) > max_line_number:
lines_input_ids = lines_input_ids[:max_line_number]
attention_masks = attention_masks[:max_line_number]
type_input_ids = type_input_ids[:max_line_number]
sample['lines_input_ids'] = lines_input_ids
sample['type_input_ids'] = type_input_ids
sample['attention_masks'] = attention_masks
sample['input_lines_tokens'] = input_lines_tokens
sample['job_input_tokens'] = job_input_tokens
sample['job_input_ids'] = job_input_ids
sample['job_attention_masks'] = job_attention_masks
return sample | eclare/baseline/binary/binary_dataset.py | from elit.utils.io_util import load_json
from elit.common.dataset import TransformDataset
class BinaryClassDataset(TransformDataset):
def load_file(self, filepath: str):
for sample in load_json(filepath):
yield sample
def create_features(sample: dict, tokenizer, max_line_length=128,
doc_stride=0, max_line_number=100,
max_job_length=512) -> dict:
if 'is_matched' in sample:
sample['label'] = sample['is_matched']
type_map = {'Profile': 0, 'Skills': 1, 'Work Experience': 2, 'Education': 3, 'Other': 4, 'Activities': 5}
job_desc = sample["applied_job_description"]
job_inputs = tokenizer.encode_plus(job_desc, None, add_special_tokens=True, max_length=max_job_length,
truncation=True)
job_input_ids = job_inputs["input_ids"]
job_input_tokens = [tokenizer._convert_id_to_token(tid) for tid in job_input_ids]
attention_mask = [1] * len(job_input_ids)
padding_length = max_job_length - len(job_input_ids)
job_input_ids = job_input_ids + ([tokenizer.pad_token_id] * padding_length)
job_attention_masks = attention_mask + ([0] * padding_length)
lines_input_ids = []
type_input_ids = []
attention_masks = []
input_lines_tokens = []
sections = sample.get('resume_data', sample.get('sections', None))
for section in sections:
type_s = section['type']
content = section['content'].replace("\n", " ").replace("\t", " ").replace("\\u", " ").lower().strip()
type_n = type_map[type_s]
tokens = tokenizer.tokenize(content)
if len(tokens) <= max_line_length - 2:
inputs = tokenizer.encode_plus(content, None, add_special_tokens=True,
max_length=max_line_length, truncation=True)
input_ids = inputs["input_ids"]
input_tokens = [tokenizer._convert_id_to_token(tid) for tid in input_ids]
attention_mask = [1] * len(input_ids)
padding_length = max_line_length - len(input_ids)
input_ids = input_ids + ([tokenizer.pad_token_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
attention_masks.append(attention_mask)
lines_input_ids.append(input_ids)
type_input_ids.append(type_n)
input_lines_tokens.append(input_tokens)
else:
doc_left_index = 0
doc_right_index = max_line_length - 2
while doc_left_index < len(tokens) - doc_stride:
if doc_right_index >= len(tokens):
doc_right_index = len(tokens)
new_line_tokens = tokens[doc_left_index:doc_right_index]
inputs = tokenizer.encode_plus(" ".join(new_line_tokens), None, add_special_tokens=True,
max_length=max_line_length)
input_ids = inputs["input_ids"]
input_tokens = [tokenizer._convert_id_to_token(tid) for tid in input_ids]
attention_mask = [1] * len(input_ids)
padding_length = max_line_length - len(input_ids)
input_ids = input_ids + ([tokenizer.pad_token_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
attention_masks.append(attention_mask)
lines_input_ids.append(input_ids)
type_input_ids.append(type_n)
input_lines_tokens.append(input_tokens)
doc_left_index = doc_right_index - doc_stride
doc_right_index = doc_left_index + max_line_length - 2
if len(lines_input_ids) > max_line_number:
lines_input_ids = lines_input_ids[:max_line_number]
attention_masks = attention_masks[:max_line_number]
type_input_ids = type_input_ids[:max_line_number]
sample['lines_input_ids'] = lines_input_ids
sample['type_input_ids'] = type_input_ids
sample['attention_masks'] = attention_masks
sample['input_lines_tokens'] = input_lines_tokens
sample['job_input_tokens'] = job_input_tokens
sample['job_input_ids'] = job_input_ids
sample['job_attention_masks'] = job_attention_masks
return sample | 0.578924 | 0.36441 |
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum, F, FloatField
''' ------------ '''
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
q.aggregate(Sum(F('price_sale') * F('quantity')), output_field=FloatField())
# falhou
''' ------------ '''
# Django 1.8.3
# http://stackoverflow.com/a/35076326/802542
from core.models import SaleDetail
from django.db.models import Sum, F, FloatField
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
q.aggregate(Sum(F('price_sale') * ('quantity'), output_field=FloatField()))
# falhou
''' ------------ '''
qs = SaleDetail.objects.filter(sale=1).values_list('price_sale', 'quantity')
list(map(lambda q: q[0] * q[1], qs))
# funciona no template, mas não funciona no Admin.
''' ------------ '''
# Django 1.7
# http://pt.stackoverflow.com/a/66694/761
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum
SaleDetail.objects.extra(
select={'subtotal': 'round(price_sale * quantity, 2)',
}).values('price_sale', 'quantity', 'subtotal').filter(sale=2)
SaleDetail.objects.extra(
select={'total': 'round(sum(price_sale*quantity),2)', }).values('total').filter(sale=2)
# OK
''' ------------ '''
# Django 1.8
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum, F, FloatField
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
qs = q.annotate(
subtotal=(F('price_sale') * F('quantity')),
output_field=FloatField())
# Falhou
''' ------------ '''
# Django 1.8
from vendas_project.vendas.models import SaleDetail
from django.db.models import F, FloatField, ExpressionWrapper
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
qs = q.annotate(subtotal=ExpressionWrapper(
F('price_sale') * F('quantity')), output_field=FloatField())
qs[0].subtotal
t = qs.aggregate(total=Sum('subtotal'))
t.total
'''
Copiando uma venda
'''
from vendas_project.vendas.models import Sale, SaleDetail
s = Sale.objects.filter(pk=300) # filtra a Venda pelo pk
d = SaleDetail.objects.filter(sale=s) # filtra os itens dessa Venda
s = Sale.objects.get(pk=s) # com o get pega o pk da Venda que foi filtrada
s.pk = None
s.save() # salva uma cópia da Venda
for i in d:
n = SaleDetail.objects.create(
sale=s, product=i.product, quantity=i.quantity, price_sale=i.price_sale) | shell/snippets.py | from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum, F, FloatField
''' ------------ '''
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
q.aggregate(Sum(F('price_sale') * F('quantity')), output_field=FloatField())
# falhou
''' ------------ '''
# Django 1.8.3
# http://stackoverflow.com/a/35076326/802542
from core.models import SaleDetail
from django.db.models import Sum, F, FloatField
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
q.aggregate(Sum(F('price_sale') * ('quantity'), output_field=FloatField()))
# falhou
''' ------------ '''
qs = SaleDetail.objects.filter(sale=1).values_list('price_sale', 'quantity')
list(map(lambda q: q[0] * q[1], qs))
# funciona no template, mas não funciona no Admin.
''' ------------ '''
# Django 1.7
# http://pt.stackoverflow.com/a/66694/761
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum
SaleDetail.objects.extra(
select={'subtotal': 'round(price_sale * quantity, 2)',
}).values('price_sale', 'quantity', 'subtotal').filter(sale=2)
SaleDetail.objects.extra(
select={'total': 'round(sum(price_sale*quantity),2)', }).values('total').filter(sale=2)
# OK
''' ------------ '''
# Django 1.8
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum, F, FloatField
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
qs = q.annotate(
subtotal=(F('price_sale') * F('quantity')),
output_field=FloatField())
# Falhou
''' ------------ '''
# Django 1.8
from vendas_project.vendas.models import SaleDetail
from django.db.models import F, FloatField, ExpressionWrapper
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
qs = q.annotate(subtotal=ExpressionWrapper(
F('price_sale') * F('quantity')), output_field=FloatField())
qs[0].subtotal
t = qs.aggregate(total=Sum('subtotal'))
t.total
'''
Copiando uma venda
'''
from vendas_project.vendas.models import Sale, SaleDetail
s = Sale.objects.filter(pk=300) # filtra a Venda pelo pk
d = SaleDetail.objects.filter(sale=s) # filtra os itens dessa Venda
s = Sale.objects.get(pk=s) # com o get pega o pk da Venda que foi filtrada
s.pk = None
s.save() # salva uma cópia da Venda
for i in d:
n = SaleDetail.objects.create(
sale=s, product=i.product, quantity=i.quantity, price_sale=i.price_sale) | 0.350977 | 0.183905 |
import numpy as np
import pytest
import popart
import torch
from op_tester import op_tester
def test_tile(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 4, 6]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
op_tester.run(init_builder,
reference,
'infer',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
})
def test_tile_variable_repeats(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 4, 6]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.tile([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
with pytest.raises(popart.popart_exception) as e_info:
op_tester.run(init_builder, reference, 'infer')
assert (e_info.value.args[0].endswith("must be of type Constant"))
def test_tile_invalid_repeat_vals(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([1, 1, -4]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
with pytest.raises(popart.popart_exception) as e_info:
op_tester.run(init_builder,
reference,
'infer',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
})
assert (e_info.value.args[0].find(
"Values must be non-negative in each shape dimension") != -1)
def test_tile_invalid_repeats_size(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 1, 4, 5]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
with pytest.raises(popart.popart_exception) as e_info:
op_tester.run(init_builder,
reference,
'infer',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
})
assert (e_info.value.args[0].endswith(
"should have one element for each dimension of the data tensor"))
def test_tile_grad(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 4, 1]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + o
]
def reference(ref_data):
a = torch.tensor(d1, requires_grad=True)
b = a.repeat(tuple(d2))
d__o = ref_data.getOutputTensorGrad(0)
b.backward(torch.tensor(d__o))
return [b, a.grad, None]
op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)
op_tester.run(init_builder,
reference,
'train',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
}) | tests/integration/operators_test/tile_test.py | import numpy as np
import pytest
import popart
import torch
from op_tester import op_tester
def test_tile(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 4, 6]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
op_tester.run(init_builder,
reference,
'infer',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
})
def test_tile_variable_repeats(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 4, 6]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.tile([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
with pytest.raises(popart.popart_exception) as e_info:
op_tester.run(init_builder, reference, 'infer')
assert (e_info.value.args[0].endswith("must be of type Constant"))
def test_tile_invalid_repeat_vals(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([1, 1, -4]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
with pytest.raises(popart.popart_exception) as e_info:
op_tester.run(init_builder,
reference,
'infer',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
})
assert (e_info.value.args[0].find(
"Values must be non-negative in each shape dimension") != -1)
def test_tile_invalid_repeats_size(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 1, 4, 5]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.tile(d1, d2)
return [out]
with pytest.raises(popart.popart_exception) as e_info:
op_tester.run(init_builder,
reference,
'infer',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
})
assert (e_info.value.args[0].endswith(
"should have one element for each dimension of the data tensor"))
def test_tile_grad(op_tester):
d1 = np.random.rand(2, 4, 3).astype(np.float32)
d2 = np.array([2, 4, 1]).astype(np.int64)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
c = builder.aiOnnx.constant(d2)
o = builder.aiOnnx.tile([i1, c])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + o
]
def reference(ref_data):
a = torch.tensor(d1, requires_grad=True)
b = a.repeat(tuple(d2))
d__o = ref_data.getOutputTensorGrad(0)
b.backward(torch.tensor(d__o))
return [b, a.grad, None]
op_tester.setPatterns(['PreUniRepl'], enableRuntimeAsserts=False)
op_tester.run(init_builder,
reference,
'train',
opsets={
"ai.onnx": 10,
"ai.graphcore": 1
}) | 0.66072 | 0.602325 |
import functools
from typing import Optional
import pytest
from orchestrator.utils.functional import (
as_t,
expand_ranges,
first_available_or_next,
ireplace,
join_cs,
orig,
to_ranges,
)
def test_join_cs():
assert join_cs("") == ""
assert join_cs([]) == ""
assert join_cs(()) == ""
assert join_cs("", []) == ""
assert join_cs([], "") == ""
assert join_cs("", ()) == ""
assert join_cs((), "") == ""
assert join_cs("a") == "a"
assert join_cs(["a"]) == "a"
assert join_cs(("a",)) == "a"
assert join_cs("a", "b") == "a,b"
assert join_cs(["a"], ["b"]) == "a,b"
assert join_cs(["a"], ("b",)) == "a,b"
assert join_cs("a,,b") == "a,b"
assert join_cs("a,b") == "a,b"
assert join_cs(["a", "b"]) == "a,b"
assert join_cs(("a", "b")) == "a,b"
assert join_cs("a,b", ["c", "d"]) == "a,b,c,d"
assert join_cs(["a", "b"], "c,d") == "a,b,c,d"
assert join_cs(("a", "b"), "c,d") == "a,b,c,d"
assert join_cs("a,b", ["c", "d"], ("e", "f")) == "a,b,c,d,e,f"
with pytest.raises(TypeError):
join_cs(1, 2)
with pytest.raises(TypeError):
join_cs([1])
def test_first_available_or_next():
assert first_available_or_next([0, 1, 3]) == 2
assert first_available_or_next([0, 1, 2, 3]) == 4
assert first_available_or_next([1, 2, 3]) == 0
assert first_available_or_next([]) == 0
assert first_available_or_next([0, 1, 3], start=11) == 11
assert first_available_or_next([0, 1, 3], start=4) == 4
assert first_available_or_next([], 22) == 22
assert first_available_or_next([1, 100, 101], 33) == 33
assert first_available_or_next([11, 22, 33, 44, 55], 33) == 34
def test_expand_ranges():
assert expand_ranges([[1], [2], [10, 12]]) == [1, 2, 10, 11]
assert expand_ranges([[1], [2], [10, 12]], inclusive=True) == [1, 2, 10, 11, 12]
assert expand_ranges([[1], [2], [10, 12]], inclusive=True) == [1, 2, 10, 11, 12]
assert expand_ranges([]) == []
# sorted
assert expand_ranges([[100], [1, 4]], inclusive=True) == [1, 2, 3, 4, 100]
# deduplicated
assert expand_ranges([[1, 5], [3, 5]], inclusive=True) == [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
expand_ranges([[]])
with pytest.raises(ValueError):
expand_ranges([[2, 100, 3]])
def test_as_t():
# Don't know how to check type annotations at runtime yet. Hence test only basic functionality of `as_t` and not
# the casting of Optional[T] to just T
x: Optional[int] = 7
y: int = as_t(x)
assert y == 7
with pytest.raises(ValueError):
as_t(None)
def test_ireplace():
assert list(ireplace(["1-10", "", "22"], "", "0")) == ["1-10", "0", "22"]
# Values are tested in their entirety, hence "10" won't be replaced in the value "1-10"
assert list(ireplace(["1-10", "", "22"], "10", "999")) == ["1-10", "", "22"]
def test_to_ranges():
assert list(to_ranges([1, 2, 3])) == [range(1, 4)]
assert list(to_ranges([])) == []
assert list(to_ranges([0])) == [range(0, 1)]
assert list(to_ranges([1, 2, 3, 7, 8, 9, 100, 200, 201, 202])) == [
range(1, 4),
range(7, 10),
range(100, 101),
range(200, 203),
]
def test_orig():
def func():
pass
@functools.wraps(func)
def wrapper():
return func()
@functools.wraps(wrapper)
def super_wrapper():
return wrapper()
assert orig(wrapper) == func
assert orig(super_wrapper) == func | test/unit_tests/utils/test_functional.py | import functools
from typing import Optional
import pytest
from orchestrator.utils.functional import (
as_t,
expand_ranges,
first_available_or_next,
ireplace,
join_cs,
orig,
to_ranges,
)
def test_join_cs():
assert join_cs("") == ""
assert join_cs([]) == ""
assert join_cs(()) == ""
assert join_cs("", []) == ""
assert join_cs([], "") == ""
assert join_cs("", ()) == ""
assert join_cs((), "") == ""
assert join_cs("a") == "a"
assert join_cs(["a"]) == "a"
assert join_cs(("a",)) == "a"
assert join_cs("a", "b") == "a,b"
assert join_cs(["a"], ["b"]) == "a,b"
assert join_cs(["a"], ("b",)) == "a,b"
assert join_cs("a,,b") == "a,b"
assert join_cs("a,b") == "a,b"
assert join_cs(["a", "b"]) == "a,b"
assert join_cs(("a", "b")) == "a,b"
assert join_cs("a,b", ["c", "d"]) == "a,b,c,d"
assert join_cs(["a", "b"], "c,d") == "a,b,c,d"
assert join_cs(("a", "b"), "c,d") == "a,b,c,d"
assert join_cs("a,b", ["c", "d"], ("e", "f")) == "a,b,c,d,e,f"
with pytest.raises(TypeError):
join_cs(1, 2)
with pytest.raises(TypeError):
join_cs([1])
def test_first_available_or_next():
assert first_available_or_next([0, 1, 3]) == 2
assert first_available_or_next([0, 1, 2, 3]) == 4
assert first_available_or_next([1, 2, 3]) == 0
assert first_available_or_next([]) == 0
assert first_available_or_next([0, 1, 3], start=11) == 11
assert first_available_or_next([0, 1, 3], start=4) == 4
assert first_available_or_next([], 22) == 22
assert first_available_or_next([1, 100, 101], 33) == 33
assert first_available_or_next([11, 22, 33, 44, 55], 33) == 34
def test_expand_ranges():
assert expand_ranges([[1], [2], [10, 12]]) == [1, 2, 10, 11]
assert expand_ranges([[1], [2], [10, 12]], inclusive=True) == [1, 2, 10, 11, 12]
assert expand_ranges([[1], [2], [10, 12]], inclusive=True) == [1, 2, 10, 11, 12]
assert expand_ranges([]) == []
# sorted
assert expand_ranges([[100], [1, 4]], inclusive=True) == [1, 2, 3, 4, 100]
# deduplicated
assert expand_ranges([[1, 5], [3, 5]], inclusive=True) == [1, 2, 3, 4, 5]
with pytest.raises(ValueError):
expand_ranges([[]])
with pytest.raises(ValueError):
expand_ranges([[2, 100, 3]])
def test_as_t():
# Don't know how to check type annotations at runtime yet. Hence test only basic functionality of `as_t` and not
# the casting of Optional[T] to just T
x: Optional[int] = 7
y: int = as_t(x)
assert y == 7
with pytest.raises(ValueError):
as_t(None)
def test_ireplace():
assert list(ireplace(["1-10", "", "22"], "", "0")) == ["1-10", "0", "22"]
# Values are tested in their entirety, hence "10" won't be replaced in the value "1-10"
assert list(ireplace(["1-10", "", "22"], "10", "999")) == ["1-10", "", "22"]
def test_to_ranges():
assert list(to_ranges([1, 2, 3])) == [range(1, 4)]
assert list(to_ranges([])) == []
assert list(to_ranges([0])) == [range(0, 1)]
assert list(to_ranges([1, 2, 3, 7, 8, 9, 100, 200, 201, 202])) == [
range(1, 4),
range(7, 10),
range(100, 101),
range(200, 203),
]
def test_orig():
def func():
pass
@functools.wraps(func)
def wrapper():
return func()
@functools.wraps(wrapper)
def super_wrapper():
return wrapper()
assert orig(wrapper) == func
assert orig(super_wrapper) == func | 0.87787 | 0.797123 |
import json
from copy import deepcopy
import kafka
from mindsdb.integrations.base import StreamIntegration
import mindsdb.interfaces.storage.db as db
from mindsdb_streams import KafkaStream, StreamController, StreamLearningController
class KafkaConnectionChecker:
def __init__(self, **params):
self.connection_info = params['connection']
def check_connection(self):
try:
client = kafka.KafkaClient(**self.connection_info)
except Exception:
return False
else:
client.close()
return True
class Kafka(StreamIntegration, KafkaConnectionChecker):
def __init__(self, config, name, db_info):
self.connection_info = db_info['connection']
# Back compatibility with initial API version
self.control_stream = db_info.get('control_stream') or db_info.get('topic') or None
if 'advanced' in db_info:
self.connection_info['advanced'] = db_info['advanced']
self.control_connection_info = deepcopy(self.connection_info)
# don't need to read all records from the beginning of 'control stream'
# since all active streams are saved in db. Use 'latest' auto_offset_reset for control stream
if 'advanced' in self.control_connection_info:
if 'consumer' in self.control_connection_info['advanced']:
self.control_connection_info['advanced']['consumer']['auto_offset_reset'] = 'latest'
StreamIntegration.__init__(
self,
config,
name,
control_stream=KafkaStream(self.control_stream, self.control_connection_info) if self.control_stream else None
)
def _make_stream(self, s: db.Stream):
if s.learning_params and s.learning_threshold:
learning_params = json.loads(s.learning_params) if isinstance(s.learning_params, str) else s.learning_params
return StreamLearningController(
s.name,
s.predictor,
learning_params,
s.learning_threshold,
stream_in=KafkaStream(s.stream_in, self.connection_info),
stream_out=KafkaStream(s.stream_out, self.connection_info),
in_thread=True
)
return StreamController(
s.name,
s.predictor,
stream_in=KafkaStream(s.stream_in, self.connection_info),
stream_out=KafkaStream(s.stream_out, self.connection_info),
stream_anomaly=KafkaStream(s.anomaly_stream, self.connection_info) if s.anomaly_stream is not None else None,
in_thread=True
) | mindsdb/integrations/kafka/kafkadb.py | import json
from copy import deepcopy
import kafka
from mindsdb.integrations.base import StreamIntegration
import mindsdb.interfaces.storage.db as db
from mindsdb_streams import KafkaStream, StreamController, StreamLearningController
class KafkaConnectionChecker:
def __init__(self, **params):
self.connection_info = params['connection']
def check_connection(self):
try:
client = kafka.KafkaClient(**self.connection_info)
except Exception:
return False
else:
client.close()
return True
class Kafka(StreamIntegration, KafkaConnectionChecker):
def __init__(self, config, name, db_info):
self.connection_info = db_info['connection']
# Back compatibility with initial API version
self.control_stream = db_info.get('control_stream') or db_info.get('topic') or None
if 'advanced' in db_info:
self.connection_info['advanced'] = db_info['advanced']
self.control_connection_info = deepcopy(self.connection_info)
# don't need to read all records from the beginning of 'control stream'
# since all active streams are saved in db. Use 'latest' auto_offset_reset for control stream
if 'advanced' in self.control_connection_info:
if 'consumer' in self.control_connection_info['advanced']:
self.control_connection_info['advanced']['consumer']['auto_offset_reset'] = 'latest'
StreamIntegration.__init__(
self,
config,
name,
control_stream=KafkaStream(self.control_stream, self.control_connection_info) if self.control_stream else None
)
def _make_stream(self, s: db.Stream):
if s.learning_params and s.learning_threshold:
learning_params = json.loads(s.learning_params) if isinstance(s.learning_params, str) else s.learning_params
return StreamLearningController(
s.name,
s.predictor,
learning_params,
s.learning_threshold,
stream_in=KafkaStream(s.stream_in, self.connection_info),
stream_out=KafkaStream(s.stream_out, self.connection_info),
in_thread=True
)
return StreamController(
s.name,
s.predictor,
stream_in=KafkaStream(s.stream_in, self.connection_info),
stream_out=KafkaStream(s.stream_out, self.connection_info),
stream_anomaly=KafkaStream(s.anomaly_stream, self.connection_info) if s.anomaly_stream is not None else None,
in_thread=True
) | 0.406273 | 0.055849 |
import numpy as np
class Tiling(object):
"""2D rectangular tiling.
Arguments:
limits {list} -- Min and max value tuple for each dimension.
ntiles {iterable} -- Number of tiles for each dimension.
offsets {iterable or None} -- Offset for each tile as multiple of tile width. No offset if None (default: None).
"""
def __init__(self, limits, ntiles, offsets=None):
self.ndim = len(limits)
self.limits = limits
self.ntiles = ntiles
self.N = np.product(ntiles)
self.offsets = offsets
edges_and_widths = [np.linspace(self.limits[i][0], self.limits[i][1],
self.ntiles[i]+1, retstep=True)
for i in range(self.ndim)]
self.edges = [ew[0][1:-1] for ew in edges_and_widths]
self.widths = [ew[1] for ew in edges_and_widths]
if offsets is not None:
for i in range(self.ndim):
# Book Page 219: Offsets scaled relative to tile width
self.edges[i] += self.offsets[i] * self.widths[i]
def tile_dims(self, s):
"""Get tile index for each dimension for a given state s.
Arguments:
s {iterable} -- values representing the state.
Returns:
list -- tile index for each dimension
"""
return [np.digitize(s[i], self.edges[i]) for i in range(self.ndim)]
def tile(self, s):
"""Get index of tile activated by state s.
Arguments:
s {iterable} -- values representing the state
Returns:
int -- Tile index.
"""
dims = self.tile_dims(s)
tile = sum([dims[i] * np.product(self.ntiles[(i+1):])
for i in range(self.ndim-1)])
tile = tile + dims[-1]
return tile
def feature(self, s):
"""Get feature vector for state s.
Arguments:
s {iterable} -- values representing the state
Returns:
np.Array -- Feature vector of length self.ntiles, all zeros except
one for activated tile.
"""
tile = self.tile(s)
features = np.zeros(self.N)
features[tile] = 1
return features.astype('int32')
def tile_from_feature(self, f):
# Get index where feature has value of 1
tile = np.where(f==1)[0][0]
return tile
def dims_from_tile(self, tile):
# Loop through number of dims
# Dim value is given by quotient of tile // size of tile space along remaining axes
dims = []
r = tile
for i in range(self.ndim-1):
dims.append(r // np.product(self.ntiles[i+1:]))
r = r % np.product(self.ntiles[i+1:])
dims.append(r)
return dims
def dims_from_feature(self, f):
tile = self.tile_from_feature(f)
dims = self.dims_from_tile(tile)
return dims
class TilingGroup(object):
"""Set of Tiling objects with appropriate offsets between them.
Arguments:
ntilings {int} -- Number of tilings to generate. Book page 220 suggests at least 4 times number of dimensions.
limits {list} -- Min and max value tuple for each dimension (same used for each tiling).
ntiles {iterable} -- Number of tiles for each dimension (same used for each tiling).
"""
def __init__(self, ntilings, limits, ntiles):
self.ntilings = ntilings
self.ntiles = ntiles
self.limits = limits
self.ndim = len(limits)
# Book page 219: Offsets scaled by 1/ntilings.
# Book page 220: Offsets tilings by (1, 3, 5...) units per dimension.
self.offset_per_tiling = (1/self.ntilings)*np.arange(1, 2*self.ndim, 2)
self.tilings = [Tiling(limits, ntiles,
offsets=i*self.offset_per_tiling)
for i in range(self.ntilings)]
self._N = [t.N for t in self.tilings]
def feature(self, s):
if not isinstance(s, (list, tuple)):
s = [s]
features = np.array([t.feature(s) for t in self.tilings])
return features.flatten()
@property
def N(self):
return sum(self._N)
def decompose_feature(self, f):
'''
'''
init_shape = f.size
fs = []
for n in self._N:
fs.append(f[:n])
f= f[n:]
assert sum([f.size for f in fs]) == init_shape
return fs | tilings.py | import numpy as np
class Tiling(object):
"""2D rectangular tiling.
Arguments:
limits {list} -- Min and max value tuple for each dimension.
ntiles {iterable} -- Number of tiles for each dimension.
offsets {iterable or None} -- Offset for each tile as multiple of tile width. No offset if None (default: None).
"""
def __init__(self, limits, ntiles, offsets=None):
self.ndim = len(limits)
self.limits = limits
self.ntiles = ntiles
self.N = np.product(ntiles)
self.offsets = offsets
edges_and_widths = [np.linspace(self.limits[i][0], self.limits[i][1],
self.ntiles[i]+1, retstep=True)
for i in range(self.ndim)]
self.edges = [ew[0][1:-1] for ew in edges_and_widths]
self.widths = [ew[1] for ew in edges_and_widths]
if offsets is not None:
for i in range(self.ndim):
# Book Page 219: Offsets scaled relative to tile width
self.edges[i] += self.offsets[i] * self.widths[i]
def tile_dims(self, s):
"""Get tile index for each dimension for a given state s.
Arguments:
s {iterable} -- values representing the state.
Returns:
list -- tile index for each dimension
"""
return [np.digitize(s[i], self.edges[i]) for i in range(self.ndim)]
def tile(self, s):
"""Get index of tile activated by state s.
Arguments:
s {iterable} -- values representing the state
Returns:
int -- Tile index.
"""
dims = self.tile_dims(s)
tile = sum([dims[i] * np.product(self.ntiles[(i+1):])
for i in range(self.ndim-1)])
tile = tile + dims[-1]
return tile
def feature(self, s):
"""Get feature vector for state s.
Arguments:
s {iterable} -- values representing the state
Returns:
np.Array -- Feature vector of length self.ntiles, all zeros except
one for activated tile.
"""
tile = self.tile(s)
features = np.zeros(self.N)
features[tile] = 1
return features.astype('int32')
def tile_from_feature(self, f):
# Get index where feature has value of 1
tile = np.where(f==1)[0][0]
return tile
def dims_from_tile(self, tile):
# Loop through number of dims
# Dim value is given by quotient of tile // size of tile space along remaining axes
dims = []
r = tile
for i in range(self.ndim-1):
dims.append(r // np.product(self.ntiles[i+1:]))
r = r % np.product(self.ntiles[i+1:])
dims.append(r)
return dims
def dims_from_feature(self, f):
tile = self.tile_from_feature(f)
dims = self.dims_from_tile(tile)
return dims
class TilingGroup(object):
"""Set of Tiling objects with appropriate offsets between them.
Arguments:
ntilings {int} -- Number of tilings to generate. Book page 220 suggests at least 4 times number of dimensions.
limits {list} -- Min and max value tuple for each dimension (same used for each tiling).
ntiles {iterable} -- Number of tiles for each dimension (same used for each tiling).
"""
def __init__(self, ntilings, limits, ntiles):
self.ntilings = ntilings
self.ntiles = ntiles
self.limits = limits
self.ndim = len(limits)
# Book page 219: Offsets scaled by 1/ntilings.
# Book page 220: Offsets tilings by (1, 3, 5...) units per dimension.
self.offset_per_tiling = (1/self.ntilings)*np.arange(1, 2*self.ndim, 2)
self.tilings = [Tiling(limits, ntiles,
offsets=i*self.offset_per_tiling)
for i in range(self.ntilings)]
self._N = [t.N for t in self.tilings]
def feature(self, s):
if not isinstance(s, (list, tuple)):
s = [s]
features = np.array([t.feature(s) for t in self.tilings])
return features.flatten()
@property
def N(self):
return sum(self._N)
def decompose_feature(self, f):
'''
'''
init_shape = f.size
fs = []
for n in self._N:
fs.append(f[:n])
f= f[n:]
assert sum([f.size for f in fs]) == init_shape
return fs | 0.782122 | 0.664731 |
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrame
from pyspark.context import SparkContext
from pyspark.sql.functions import unix_timestamp
## @params: [JOB_NAME,S3_CSV_OUTPUT_PATH]
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'S3_CSV_OUTPUT_PATH'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
# Load JSON files into dynamic frame.
datasource0 = glueContext.create_dynamic_frame_from_options("s3", {'paths': ["s3://segment-personalize-workshop/segment-logs"], 'recurse':True}, format="json")
print("Input file total record count: ", datasource0.count())
# Filters the JSON documents that we want included in the output CSV.
# These are the event types we're interested for our dataset.
supported_events = ['Product Added', 'Order Completed', 'Product Clicked']
def filter_function(dynamicRecord):
if ('userId' in dynamicRecord and
'properties' in dynamicRecord and
'sku' in dynamicRecord["properties"] and
'event' in dynamicRecord and
dynamicRecord['event'] in supported_events):
return True
else:
return False
# Apply filter function to dynamic frame
interactions = Filter.apply(frame = datasource0, f = filter_function, transformation_ctx = "interactions")
print("Filtered record count: ", interactions.count())
# Map only the fields we want in the output CSV, changing names to match target schema.
applymapping1 = ApplyMapping.apply(frame = interactions, mappings = [ \
("userId", "string", "USER_ID", "string"), \
("properties.sku", "string", "ITEM_ID", "string"), \
("event", "string", "EVENT_TYPE", "string"), \
("timestamp", "string", "TIMESTAMP_ISO", "string")], \
transformation_ctx = "applymapping1")
# Repartition to a single file since that is what is required by Personalize
onepartitionDF = applymapping1.toDF().repartition(1)
# Coalesce timestamp into unix timestamp
onepartitionDF = onepartitionDF.withColumn("TIMESTAMP", \
unix_timestamp(onepartitionDF['TIMESTAMP_ISO'], "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"))
# Convert back to dynamic frame
onepartition = DynamicFrame.fromDF(onepartitionDF, glueContext, "onepartition_df")
# Drop the ISO formatted timestamp
onepartition = onepartition.drop_fields(['TIMESTAMP_ISO'])
# Write output back to S3 as a CSV
glueContext.write_dynamic_frame.from_options(frame = onepartition, connection_type = "s3", \
connection_options = {"path": args['S3_CSV_OUTPUT_PATH']}, \
format = "csv", transformation_ctx = "datasink2")
job.commit() | exercise1/etl/glue_etl.py | import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrame
from pyspark.context import SparkContext
from pyspark.sql.functions import unix_timestamp
## @params: [JOB_NAME,S3_CSV_OUTPUT_PATH]
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'S3_CSV_OUTPUT_PATH'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
# Load JSON files into dynamic frame.
datasource0 = glueContext.create_dynamic_frame_from_options("s3", {'paths': ["s3://segment-personalize-workshop/segment-logs"], 'recurse':True}, format="json")
print("Input file total record count: ", datasource0.count())
# Filters the JSON documents that we want included in the output CSV.
# These are the event types we're interested for our dataset.
supported_events = ['Product Added', 'Order Completed', 'Product Clicked']
def filter_function(dynamicRecord):
if ('userId' in dynamicRecord and
'properties' in dynamicRecord and
'sku' in dynamicRecord["properties"] and
'event' in dynamicRecord and
dynamicRecord['event'] in supported_events):
return True
else:
return False
# Apply filter function to dynamic frame
interactions = Filter.apply(frame = datasource0, f = filter_function, transformation_ctx = "interactions")
print("Filtered record count: ", interactions.count())
# Map only the fields we want in the output CSV, changing names to match target schema.
applymapping1 = ApplyMapping.apply(frame = interactions, mappings = [ \
("userId", "string", "USER_ID", "string"), \
("properties.sku", "string", "ITEM_ID", "string"), \
("event", "string", "EVENT_TYPE", "string"), \
("timestamp", "string", "TIMESTAMP_ISO", "string")], \
transformation_ctx = "applymapping1")
# Repartition to a single file since that is what is required by Personalize
onepartitionDF = applymapping1.toDF().repartition(1)
# Coalesce timestamp into unix timestamp
onepartitionDF = onepartitionDF.withColumn("TIMESTAMP", \
unix_timestamp(onepartitionDF['TIMESTAMP_ISO'], "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"))
# Convert back to dynamic frame
onepartition = DynamicFrame.fromDF(onepartitionDF, glueContext, "onepartition_df")
# Drop the ISO formatted timestamp
onepartition = onepartition.drop_fields(['TIMESTAMP_ISO'])
# Write output back to S3 as a CSV
glueContext.write_dynamic_frame.from_options(frame = onepartition, connection_type = "s3", \
connection_options = {"path": args['S3_CSV_OUTPUT_PATH']}, \
format = "csv", transformation_ctx = "datasink2")
job.commit() | 0.500977 | 0.298798 |
import requests
import time
from collections import OrderedDict
import smtplib
from email.mime.text import MIMEText
import os
_ip_checker_list = OrderedDict()
_notifications = set()
def registered_ip_checker(fn):
_ip_checker_list[fn] = None
return fn
def registered_notification(fn):
_notifications.add(fn)
return fn
@registered_ip_checker
def get_ip_from_ipconfig_dot_me(old):
try:
res = requests.get("http://ipconfig.me/ip")
except:
return old
if res.status_code not in (200, 201):
return old
return res.text.strip()
@registered_notification
def send_email(new_ip, old_ip):
from_addr = os.environ.get("NOTIFICATION_EMAIL_FROM")
to_addr = os.environ.get("NOTIFICATION_EMAIL_TO")
password = os.environ.get("NOTIFICATION_EMAIL_PASSWD")
if from_addr and to_addr and password:
msg = MIMEText("""
IP Change Form {} to {}
""".format(new_ip, old_ip))
msg['Subject'] = 'IP Change Form {} to {}'.format(new_ip, old_ip)
msg['From'] = from_addr
msg['To'] = to_addr
server = smtplib.SMTP("smtp-mail.outlook.com", 587)
server.set_debuglevel(1)
server.starttls()
server.login(from_addr, password)
server.send(msg.as_string())
server.close()
else:
print("email environ error")
def monitor():
need_notification = []
while True:
for fn, ip in _ip_checker_list.items():
print(fn)
n_ip = fn(ip)
if n_ip != ip and ip is not None:
need_notification = [(n_ip, ip)]
_ip_checker_list[fn] = n_ip
while len(need_notification) > 0:
n_ip, ip = need_notification.pop()
try:
for nfn in _notifications:
nfn(n_ip, ip)
except:
import traceback
traceback.print_exc()
need_notification.append((n_ip, ip))
time.sleep(10)
if __name__ == "__main__":
monitor() | src/public_ip_monitor.py | import requests
import time
from collections import OrderedDict
import smtplib
from email.mime.text import MIMEText
import os
_ip_checker_list = OrderedDict()
_notifications = set()
def registered_ip_checker(fn):
_ip_checker_list[fn] = None
return fn
def registered_notification(fn):
_notifications.add(fn)
return fn
@registered_ip_checker
def get_ip_from_ipconfig_dot_me(old):
try:
res = requests.get("http://ipconfig.me/ip")
except:
return old
if res.status_code not in (200, 201):
return old
return res.text.strip()
@registered_notification
def send_email(new_ip, old_ip):
from_addr = os.environ.get("NOTIFICATION_EMAIL_FROM")
to_addr = os.environ.get("NOTIFICATION_EMAIL_TO")
password = os.environ.get("NOTIFICATION_EMAIL_PASSWD")
if from_addr and to_addr and password:
msg = MIMEText("""
IP Change Form {} to {}
""".format(new_ip, old_ip))
msg['Subject'] = 'IP Change Form {} to {}'.format(new_ip, old_ip)
msg['From'] = from_addr
msg['To'] = to_addr
server = smtplib.SMTP("smtp-mail.outlook.com", 587)
server.set_debuglevel(1)
server.starttls()
server.login(from_addr, password)
server.send(msg.as_string())
server.close()
else:
print("email environ error")
def monitor():
need_notification = []
while True:
for fn, ip in _ip_checker_list.items():
print(fn)
n_ip = fn(ip)
if n_ip != ip and ip is not None:
need_notification = [(n_ip, ip)]
_ip_checker_list[fn] = n_ip
while len(need_notification) > 0:
n_ip, ip = need_notification.pop()
try:
for nfn in _notifications:
nfn(n_ip, ip)
except:
import traceback
traceback.print_exc()
need_notification.append((n_ip, ip))
time.sleep(10)
if __name__ == "__main__":
monitor() | 0.208743 | 0.054174 |
import urllib.parse
import urllib.request
import urllib.error
import json
'''
Below is a sample of currency code's you can use for this program.
United Arab Emirates = AED
United States = USD
Taiwan = TWD
Kenya = KES
Bitcoin = BTC
Ethereum = ETH
Litecoin = LTC
'''
# Enter your own API Key from http://www.alphavantage.co/support/#api-key
ALPHAVANTAGE_KEY = ''
# Calls the API and accepts user input for currencies to convert.
def getAlphaVantage():
baseurl = 'https://www.alphavantage.co/query?'
method = 'function=CURRENCY_EXCHANGE_RATE'
from_currency = 'from_currency=' + (input("\nEnter the currency you are transferring from: ").upper())
to_currency = 'to_currency=' + (input("Enter the currency you are transferring to: ").upper())
api_key = 'apikey=' + ALPHAVANTAGE_KEY
request = baseurl + method + "&" + from_currency + "&" + to_currency + "&" + api_key
json_string = urllib.request.urlopen(request).read()
data = json.loads(json_string)
return data
# Outputs data of interest.
def printAlphaVantage():
get = getAlphaVantage()
from_code = get['Realtime Currency Exchange Rate']['1. From_Currency Code']
from_name = get['Realtime Currency Exchange Rate']['2. From_Currency Name']
to_code = get['Realtime Currency Exchange Rate']['3. To_Currency Code']
to_name = get['Realtime Currency Exchange Rate']['4. To_Currency Name']
quantity = input("How much of %s (%s) would you like to convert to %s (%s): " % (from_name, from_code, to_name,
to_code))
rate = float(get['Realtime Currency Exchange Rate']['5. Exchange Rate']) * float(quantity)
refresh = get['Realtime Currency Exchange Rate']['6. Last Refreshed']
time_zone = get['Realtime Currency Exchange Rate']['7. Time Zone']
print("The current exchange rate for %s %s (%s) is %s %s (%s). This information was last updated on %s %s.\n" % (
quantity, from_name, from_code, rate, to_name, to_code, refresh, time_zone))
# Handles HTTP errors.
def getAlphaVantageSafe():
try:
return printAlphaVantage()
except urllib.error.URLError as e:
if hasattr(e, "code"):
print("The server couldn't fulfill the request.")
print("Error code: ", e.code)
elif hasattr(e, 'reason'):
print("We failed to reach a server")
print("Reason: ", e.reason)
return None
print("Welcome to Wesley's real-time exchange rate for any pair of digital currency or physical currency.")
getAlphaVantageSafe()
# Will ask user if they want to convert another pair of currency. Accepts yes, yeah, y etc for continuation.
while True:
another = (input("Do you want to convert another pair of currency? (Y/N): ")).upper()
if another[0] == 'Y':
getAlphaVantageSafe()
else:
break | trial.py | import urllib.parse
import urllib.request
import urllib.error
import json
'''
Below is a sample of currency code's you can use for this program.
United Arab Emirates = AED
United States = USD
Taiwan = TWD
Kenya = KES
Bitcoin = BTC
Ethereum = ETH
Litecoin = LTC
'''
# Enter your own API Key from http://www.alphavantage.co/support/#api-key
ALPHAVANTAGE_KEY = ''
# Calls the API and accepts user input for currencies to convert.
def getAlphaVantage():
baseurl = 'https://www.alphavantage.co/query?'
method = 'function=CURRENCY_EXCHANGE_RATE'
from_currency = 'from_currency=' + (input("\nEnter the currency you are transferring from: ").upper())
to_currency = 'to_currency=' + (input("Enter the currency you are transferring to: ").upper())
api_key = 'apikey=' + ALPHAVANTAGE_KEY
request = baseurl + method + "&" + from_currency + "&" + to_currency + "&" + api_key
json_string = urllib.request.urlopen(request).read()
data = json.loads(json_string)
return data
# Outputs data of interest.
def printAlphaVantage():
get = getAlphaVantage()
from_code = get['Realtime Currency Exchange Rate']['1. From_Currency Code']
from_name = get['Realtime Currency Exchange Rate']['2. From_Currency Name']
to_code = get['Realtime Currency Exchange Rate']['3. To_Currency Code']
to_name = get['Realtime Currency Exchange Rate']['4. To_Currency Name']
quantity = input("How much of %s (%s) would you like to convert to %s (%s): " % (from_name, from_code, to_name,
to_code))
rate = float(get['Realtime Currency Exchange Rate']['5. Exchange Rate']) * float(quantity)
refresh = get['Realtime Currency Exchange Rate']['6. Last Refreshed']
time_zone = get['Realtime Currency Exchange Rate']['7. Time Zone']
print("The current exchange rate for %s %s (%s) is %s %s (%s). This information was last updated on %s %s.\n" % (
quantity, from_name, from_code, rate, to_name, to_code, refresh, time_zone))
# Handles HTTP errors.
def getAlphaVantageSafe():
try:
return printAlphaVantage()
except urllib.error.URLError as e:
if hasattr(e, "code"):
print("The server couldn't fulfill the request.")
print("Error code: ", e.code)
elif hasattr(e, 'reason'):
print("We failed to reach a server")
print("Reason: ", e.reason)
return None
print("Welcome to Wesley's real-time exchange rate for any pair of digital currency or physical currency.")
getAlphaVantageSafe()
# Will ask user if they want to convert another pair of currency. Accepts yes, yeah, y etc for continuation.
while True:
another = (input("Do you want to convert another pair of currency? (Y/N): ")).upper()
if another[0] == 'Y':
getAlphaVantageSafe()
else:
break | 0.474875 | 0.183319 |
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
__all__ = ['null_checker', 'non_null_data_args',
'values_in_time', 'plot_what_if']
def values_in_time(obj, t, tau=None):
"""Obtain value(s) of object at time t, or right before.
Optionally specify time tau>=t for which we want a prediction,
otherwise it is assumed tau = t.
obj: callable, pd.Series, pd.DataFrame, or something else.
If a callable, we return obj(t,tau).
If obj has an index attribute,
we try to return obj.loc[t],
or obj.loc[t, tau], if the index is a MultiIndex.
If not available, we return obj.
Otherwise, we return obj.
t: np.Timestamp (or similar). Time at which we want
the value.
tau: np.Timestamp (or similar), or None. Time tau >= t
of the prediction, e.g., tau could be tomorrow, t
today, and we ask for prediction of market volume tomorrow,
made today. If None, then it is assumed tau = t.
"""
if hasattr(obj, '__call__'):
return obj(t, tau)
if isinstance(obj, pd.Series) or isinstance(obj, pd.DataFrame):
try:
if isinstance(obj.index, pd.MultiIndex):
return obj.loc[(t, tau)]
else:
return obj.loc[t]
except KeyError:
return obj
return obj
def plot_what_if(time, true_results, alt_results):
true_results.value.plot(label=true_results.pol_name)
for result in alt_results:
result.value.plot(label=result.pol_name, linestyle="--")
plt.axvline(x=time, linestyle=":")
def null_checker(obj):
"""Check if obj contains NaN."""
if (isinstance(obj, pd.DataFrame) or
isinstance(obj, pd.Series)):
if np.any(pd.isnull(obj)):
raise ValueError('Data object contains NaN values', obj)
elif np.isscalar(obj):
if np.isnan(obj):
raise ValueError('Data object contains NaN values', obj)
else:
raise TypeError('Data object can only be scalar or Pandas.')
def non_null_data_args(f):
def new_f(*args, **kwds):
for el in args:
null_checker(el)
for el in kwds.values():
null_checker(el)
return f(*args, **kwds)
return new_f | cvxportfolio/utils.py | import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
__all__ = ['null_checker', 'non_null_data_args',
'values_in_time', 'plot_what_if']
def values_in_time(obj, t, tau=None):
"""Obtain value(s) of object at time t, or right before.
Optionally specify time tau>=t for which we want a prediction,
otherwise it is assumed tau = t.
obj: callable, pd.Series, pd.DataFrame, or something else.
If a callable, we return obj(t,tau).
If obj has an index attribute,
we try to return obj.loc[t],
or obj.loc[t, tau], if the index is a MultiIndex.
If not available, we return obj.
Otherwise, we return obj.
t: np.Timestamp (or similar). Time at which we want
the value.
tau: np.Timestamp (or similar), or None. Time tau >= t
of the prediction, e.g., tau could be tomorrow, t
today, and we ask for prediction of market volume tomorrow,
made today. If None, then it is assumed tau = t.
"""
if hasattr(obj, '__call__'):
return obj(t, tau)
if isinstance(obj, pd.Series) or isinstance(obj, pd.DataFrame):
try:
if isinstance(obj.index, pd.MultiIndex):
return obj.loc[(t, tau)]
else:
return obj.loc[t]
except KeyError:
return obj
return obj
def plot_what_if(time, true_results, alt_results):
true_results.value.plot(label=true_results.pol_name)
for result in alt_results:
result.value.plot(label=result.pol_name, linestyle="--")
plt.axvline(x=time, linestyle=":")
def null_checker(obj):
"""Check if obj contains NaN."""
if (isinstance(obj, pd.DataFrame) or
isinstance(obj, pd.Series)):
if np.any(pd.isnull(obj)):
raise ValueError('Data object contains NaN values', obj)
elif np.isscalar(obj):
if np.isnan(obj):
raise ValueError('Data object contains NaN values', obj)
else:
raise TypeError('Data object can only be scalar or Pandas.')
def non_null_data_args(f):
def new_f(*args, **kwds):
for el in args:
null_checker(el)
for el in kwds.values():
null_checker(el)
return f(*args, **kwds)
return new_f | 0.525125 | 0.515498 |
import os
import torch
import click
from torch import nn, optim
from torchvision import transforms
from torch.utils.data import random_split, DataLoader
from ignite.engine import Engine, Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Precision, Recall, RunningAverage, Loss
from ignite.handlers import ModelCheckpoint, EarlyStopping, TerminateOnNan
from ignite.contrib.handlers import ProgressBar
from cutout import model, dataset
@click.group()
def cli():
pass
@cli.command()
@click.option('-n', '--name', default='model', help='prefix for checkpoint file names')
@click.option('-i', '--load', default=None, type=click.Path(exists=True, readable=True), help='pretrained weights to load')
@click.option('-l', '--lrate', default=0.0001, help='initial learning rate')
@click.option('--weight-decay', default=1e-5, help='weight decay')
@click.option('-d', '--device', default='cpu', help='pytorch device')
@click.option('-r', '--refine-features/--freeze-features', default=False, help='Freeze pretrained feature weights')
@click.option('--lag', show_default=True, default=20, help='Number of epochs to wait before stopping training without improvement')
@click.option('--min-delta', show_default=True, default=0.005, help='Minimum improvement between epochs to reset early stopping')
@click.option('--threads', default=min(len(os.sched_getaffinity(0)), 4))
@click.argument('ground_truth', nargs=1)
def train(name, load, lrate, weight_decay, device, refine_features, lag,
min_delta, threads, ground_truth):
print('model output name: {}'.format(name))
torch.set_num_threads(threads)
data_set = dataset.CutoutDataset(ground_truth)
train_split = int(len(data_set)*0.9)
train_set, val_set = random_split(data_set, [train_split, len(data_set)-train_split])
train_data_loader = DataLoader(dataset=train_set, num_workers=threads, batch_size=1, shuffle=True, pin_memory=True)
val_data_loader = DataLoader(dataset=val_set, num_workers=threads, batch_size=1, pin_memory=True)
print('Got {}/{} samples in train/validation set'.format(len(train_set), len(val_set)))
net = model.ClassificationNet(refine_features)
if load:
print('loading weights')
net = torch.load(load, map_location='cpu')
net.refine_features(refine_features)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lrate, weight_decay=weight_decay)
def score_function(engine):
val_loss = engine.state.metrics['loss']
return -val_loss
trainer = create_supervised_trainer(net, optimizer, criterion, device=device, non_blocking=True)
evaluator = create_supervised_evaluator(net, device=device, non_blocking=True, metrics={'accuracy': Accuracy(),
'precision': Precision(),
'recall': Recall(),
'loss': Loss(criterion)})
ckpt_handler = ModelCheckpoint('.', name, save_interval=1, n_saved=10, require_empty=False)
est_handler = EarlyStopping(lag, score_function, trainer)
RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
progress_bar = ProgressBar(persist=True)
progress_bar.attach(trainer, ['loss'])
evaluator.add_event_handler(Events.COMPLETED, est_handler)
trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=ckpt_handler, to_save={'net': net})
trainer.add_event_handler(event_name=Events.ITERATION_COMPLETED, handler=TerminateOnNan())
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_data_loader)
metrics = evaluator.state.metrics
progress_bar.log_message('eval results - epoch {} loss: {:.2f} accuracy: {:.2f} recall: {:.2f} precision {:.2f}'.format(engine.state.epoch,
metrics['loss'],
metrics['accuracy'],
metrics['recall'],
metrics['precision']))
trainer.run(train_data_loader, max_epochs=1000)
@cli.command()
@click.option('-m', '--model', default=None, help='model file')
@click.option('-d', '--device', default='cpu', help='pytorch device')
@click.argument('images', nargs=-1)
def pred(model, device, images):
device = torch.device(device)
with open(model, 'rb') as fp:
net = torch.load(fp, map_location=device)
with torch.no_grad():
for img in images:
print('transforming image {}'.format(img))
im = Image.open(img).convert('RGB')
norm_im = dataset.default_transforms(im)
print('running forward pass')
o = m.forward(norm_im.unsqueeze(0))
o = torch.sigmoid(o)
print('pred: {}'.format(o)) | cutout/main.py | import os
import torch
import click
from torch import nn, optim
from torchvision import transforms
from torch.utils.data import random_split, DataLoader
from ignite.engine import Engine, Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Precision, Recall, RunningAverage, Loss
from ignite.handlers import ModelCheckpoint, EarlyStopping, TerminateOnNan
from ignite.contrib.handlers import ProgressBar
from cutout import model, dataset
@click.group()
def cli():
pass
@cli.command()
@click.option('-n', '--name', default='model', help='prefix for checkpoint file names')
@click.option('-i', '--load', default=None, type=click.Path(exists=True, readable=True), help='pretrained weights to load')
@click.option('-l', '--lrate', default=0.0001, help='initial learning rate')
@click.option('--weight-decay', default=1e-5, help='weight decay')
@click.option('-d', '--device', default='cpu', help='pytorch device')
@click.option('-r', '--refine-features/--freeze-features', default=False, help='Freeze pretrained feature weights')
@click.option('--lag', show_default=True, default=20, help='Number of epochs to wait before stopping training without improvement')
@click.option('--min-delta', show_default=True, default=0.005, help='Minimum improvement between epochs to reset early stopping')
@click.option('--threads', default=min(len(os.sched_getaffinity(0)), 4))
@click.argument('ground_truth', nargs=1)
def train(name, load, lrate, weight_decay, device, refine_features, lag,
min_delta, threads, ground_truth):
print('model output name: {}'.format(name))
torch.set_num_threads(threads)
data_set = dataset.CutoutDataset(ground_truth)
train_split = int(len(data_set)*0.9)
train_set, val_set = random_split(data_set, [train_split, len(data_set)-train_split])
train_data_loader = DataLoader(dataset=train_set, num_workers=threads, batch_size=1, shuffle=True, pin_memory=True)
val_data_loader = DataLoader(dataset=val_set, num_workers=threads, batch_size=1, pin_memory=True)
print('Got {}/{} samples in train/validation set'.format(len(train_set), len(val_set)))
net = model.ClassificationNet(refine_features)
if load:
print('loading weights')
net = torch.load(load, map_location='cpu')
net.refine_features(refine_features)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lrate, weight_decay=weight_decay)
def score_function(engine):
val_loss = engine.state.metrics['loss']
return -val_loss
trainer = create_supervised_trainer(net, optimizer, criterion, device=device, non_blocking=True)
evaluator = create_supervised_evaluator(net, device=device, non_blocking=True, metrics={'accuracy': Accuracy(),
'precision': Precision(),
'recall': Recall(),
'loss': Loss(criterion)})
ckpt_handler = ModelCheckpoint('.', name, save_interval=1, n_saved=10, require_empty=False)
est_handler = EarlyStopping(lag, score_function, trainer)
RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
progress_bar = ProgressBar(persist=True)
progress_bar.attach(trainer, ['loss'])
evaluator.add_event_handler(Events.COMPLETED, est_handler)
trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=ckpt_handler, to_save={'net': net})
trainer.add_event_handler(event_name=Events.ITERATION_COMPLETED, handler=TerminateOnNan())
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_data_loader)
metrics = evaluator.state.metrics
progress_bar.log_message('eval results - epoch {} loss: {:.2f} accuracy: {:.2f} recall: {:.2f} precision {:.2f}'.format(engine.state.epoch,
metrics['loss'],
metrics['accuracy'],
metrics['recall'],
metrics['precision']))
trainer.run(train_data_loader, max_epochs=1000)
@cli.command()
@click.option('-m', '--model', default=None, help='model file')
@click.option('-d', '--device', default='cpu', help='pytorch device')
@click.argument('images', nargs=-1)
def pred(model, device, images):
device = torch.device(device)
with open(model, 'rb') as fp:
net = torch.load(fp, map_location=device)
with torch.no_grad():
for img in images:
print('transforming image {}'.format(img))
im = Image.open(img).convert('RGB')
norm_im = dataset.default_transforms(im)
print('running forward pass')
o = m.forward(norm_im.unsqueeze(0))
o = torch.sigmoid(o)
print('pred: {}'.format(o)) | 0.678007 | 0.178669 |
import pandas as pd
from urllib.request import urlopen
import urllib.request
import json
import re
import numpy as np
def catch_nutriment_value(nutri_dict, id_):
''' Catch the value of a nutriment defined by its id_ '''
'''
Input :
nutri_dict(dictionary): contains all nutrients information of a product
id_(int): contains the id of the nutrient in the database
Output :
value(float): value of the asked nutrient
'''
# Init
value = 0
# Iterate over the nutrients to match the id with the corresponding nutrient
for i in range(len(nutri_dict)):
if int(nutri_dict[i]['nutrient_id']) == id_:
value = float(nutri_dict[i]['value'])
return value
def catch_fruit_or_veg(raw_aliment):
''' Return 1 if the element is a fruit or a vegetable '''
'''
Input :
raw_aliment(dictionary): product from the database
Output :
fruit_or_veg(float): returns the fruits/vegs/nuts content
'''
# Init
fruit_or_veg = 0.
# If the group (i.e. type) of the product is fruits, vegetables or nuts, we put the corresponding
# fruits/vegs/nuts content to 1
group = raw_aliment['group']
if group == 'Fruits and Fruit Juices': fruit_or_veg = 1.
elif group == 'Vegetables and Vegetable Products' : fruit_or_veg = 1.
elif group == 'Legumes and Legume Products' : fruit_or_veg = 1.
return fruit_or_veg
def find_raw_aliment(search_dict):
''' Sometimes, the raw aliment is not the first to appear in search result, this function is there
to ensure that the raw aliment is preferred. '''
'''
Input :
search_dict(dictionary): product from the database
Output :
fruit_or_veg(float): returns the fruits/vegs/nuts content
'''
# Init
score_list = []
aliment_list = search_dict['list']['item']
bonus_list = ['Fruits and Fruit Juices','Vegetables and Vegetable Products','Legumes and Legume Products']
best_score = 0
# Attribute a score to each aliment that is more susceptible to be a raw aliment
for i in range(len(aliment_list)):
score = 0
# Use keywords 'raw' and 'unprepared' to detect raw aliments
if ('raw' in aliment_list[i]['name']) or ('unprepared' in aliment_list[i]['name']):
score += 1
# Use group (i.e category) to detect raw aliments
if (aliment_list[i]['group'] in bonus_list) : score += 1
# Store the score in a list
score_list.append(score)
# Return the aliment which has the highest score (there can be several) and is also the upper in the list
for i in range(len(aliment_list)):
# NB the entries are also classified by relevance in the database, so that the upper entries
# are more likely to be relevant
if score_list[i] == max(score_list) : return aliment_list[i]
def scrap(query_, ds_='Standard%20Reference', type_ = 'b'):
''' Scrap nutriment values from US Agriculture department database '''
'''
Input :
query(str): name of the product we want to query with the API (e.g 'pear')
ds_(str): Data source. Must be either 'Branded Food Products' or 'Standard Reference'
type_(str): Report type. [b]asic or [f]ull or [s]tats
Output :
fruit_or_veg(float): returns the fruits/vegs/nuts content
'''
# Init constant
kcal_to_kJ = 4.184
# Allow to handle spaces in query without any problem to establish url
error_ = { "errors": { "error": [{
"status": 400,
"parameter": "results",
"message": "Your search resulted in zero results.Change your parameters and try again" }]}}
query_ = query_.replace(' ', '%20')
# Parameters
api_key_ = '<KEY>' # Official API key for access to US gov database
format1_ = 'json' # Output format
sort_ = 'r' # Sort by relevance
max_ = '20' # Number of search result(s)
offset_ = '0' # Beginning row in the result
# Query the API (will list all the possible results)
url_search = 'https://api.nal.usda.gov/ndb/search/' + '?format=' + format1_ + '&q=' + query_ + \
'&max=' + max_ + '&sort=' + sort_ + '&offset=' + offset_ + '&ds=' + ds_ + '&api_key=' + api_key_
f_search = urlopen(url_search)
assert f_search.code == 200
search_dict = json.loads(f_search.read())
# Error handling
if search_dict == error_:
ds2_='Branded%20Food%20Products'
url_search = 'https://api.nal.usda.gov/ndb/search/' + '?format=' + format1_ + '&q=' + query_ + \
'&max=' + max_ + '&sort=' + sort_ + '&offset=' + offset_ + '&ds=' + ds2_ + '&api_key=' + api_key_
f_search = urlopen(url_search)
assert f_search.code == 200
search_dict = json.loads(f_search.read())
if search_dict == error_:
return {'Name' : np.nan,'kJ': np.nan,'Proteins' : np.nan,'Sugars' : np.nan,'Sat_fats' : np.nan,'Fibers' : np.nan,
'Sodium': np.nan,'Lipids' : np.nan,'Fruit_Veg_content' : np.nan}
# From the possible results list, we now have to choose the best product
# NB: this could be another product than the top product from the list
# In our case, we would like the find the most 'raw' product
f_search = urlopen(url_search)
assert f_search.code == 200
search_dict = json.loads(f_search.read())
# Find the most 'raw' element
raw_aliment = find_raw_aliment(search_dict)
# Identification number in the database
ndbno_ = raw_aliment['ndbno']
# Get the proper report and open it
url_food_report = 'https://api.nal.usda.gov/ndb/reports/' + '?ndbno=' + ndbno_ + '&type=' + type_ + \
'&format=' + format1_ + '&api_key=' + api_key_
f_food_report = urlopen(url_food_report)
assert f_food_report.code == 200
# Load report
food_report_dict = json.loads(f_food_report.read())
nutri_dict = food_report_dict['report']['food']['nutrients']
# Catch nutriments using ID from the US database
nutri_values = {
'Name' : raw_aliment['name'],
'kJ': catch_nutriment_value(nutri_dict, 208) * kcal_to_kJ,
'Proteins' : catch_nutriment_value(nutri_dict, 203),
'Sugars' : catch_nutriment_value(nutri_dict, 269),
'Sat_fats' : catch_nutriment_value(nutri_dict, 606),
'Fibers' : catch_nutriment_value(nutri_dict, 291),
'Sodium' : catch_nutriment_value(nutri_dict, 307),
'Lipids' : catch_nutriment_value(nutri_dict, 204),
'Fruit_Veg_content' : catch_fruit_or_veg(raw_aliment)
}
return nutri_values
def fill_from_Api(product_name):
''' This function uses the API from US Agriculture department to scrap information about the product '''
'''
Input :
product_name(str): name of the product we want to query with the API (e.g 'pear')
Output :
product_fill[column_for_product](pandas dataframe row): Row from the dataframe containing the product
with all information necessary to be compatible with the rest of the programm
'''
# The US database is ASCII-encoded, while our should at least be latin-1
# Therefore, we handle here the most frequent exceptions
query = product_name
query = re.sub('[éèêëÈÉÊË]', 'e', query)
query = re.sub('[àáâãäåæÀÁÂÃÄÅÆ]', 'a', query)
query = re.sub('[òóôõöøÒÓÔÕÖØ]', 'o', query)
query = re.sub('[ùúûüÙÚÛÜ]', 'u', query)
query = re.sub('[ìíîïÌÍÎÏ]', 'i', query)
query = re.sub('[ýÿÝŸ]', 'y', query)
query = re.sub('[ñÑ]', 'y', query)
query = re.sub('[çÇ]', 'c', query)
query = re.sub('[ß]', 'ss', query)
query = re.sub('[$£ÞÐð]', '', query)
# Scrap from the US database
dic = scrap(query_ = query)
# Format the result in the same system than openfoodfacts
tags = ' '
code = '000'
columns = {
'Name' : 'product_name',
'kJ' : 'energy_100g',
'Proteins': 'proteins_100g',
'Sugars' : 'sugars_100g',
'Sat_fats' : 'saturated-fat_100g',
'Fibers': 'fiber_100g',
'Sodium': 'sodium_100g',
'Lipids' : 'fat_100g',
'Fruit_Veg_content' : 'fruits-vegetables-nuts-estimate_100g'
}
# Only keep useful columns for the rest of the algorithm
column_for_product = ['product_name','categories_tags','energy_100g',
'fat_100g','saturated-fat_100g','sugars_100g',
'salt_100g','sodium_100g','fruits-vegetables-nuts_100g',
'fruits-vegetables-nuts-estimate_100g','fiber_100g','proteins_100g']
dic['code'] = code
dic['categories_tags'] = tags
dic['Sodium'] = dic['Sodium']*0.001 # mg => g
dic['salt_100g'] = dic['Sodium']*2.5 # extrapolate salt from sodium
if dic['Fruit_Veg_content'] == 1.:
dic['Fruit_Veg_content'] = 100.0
else:
dic['Fruit_Veg_content'] = 0.0
dic['fruits-vegetables-nuts_100g'] = np.nan
# Fill the product with the new data
product_fill = pd.DataFrame(data = dic, index = ['0']).set_index('code')
product_fill.rename(columns=columns, inplace=True)
return product_fill[column_for_product] | API_US_agri.py | import pandas as pd
from urllib.request import urlopen
import urllib.request
import json
import re
import numpy as np
def catch_nutriment_value(nutri_dict, id_):
''' Catch the value of a nutriment defined by its id_ '''
'''
Input :
nutri_dict(dictionary): contains all nutrients information of a product
id_(int): contains the id of the nutrient in the database
Output :
value(float): value of the asked nutrient
'''
# Init
value = 0
# Iterate over the nutrients to match the id with the corresponding nutrient
for i in range(len(nutri_dict)):
if int(nutri_dict[i]['nutrient_id']) == id_:
value = float(nutri_dict[i]['value'])
return value
def catch_fruit_or_veg(raw_aliment):
''' Return 1 if the element is a fruit or a vegetable '''
'''
Input :
raw_aliment(dictionary): product from the database
Output :
fruit_or_veg(float): returns the fruits/vegs/nuts content
'''
# Init
fruit_or_veg = 0.
# If the group (i.e. type) of the product is fruits, vegetables or nuts, we put the corresponding
# fruits/vegs/nuts content to 1
group = raw_aliment['group']
if group == 'Fruits and Fruit Juices': fruit_or_veg = 1.
elif group == 'Vegetables and Vegetable Products' : fruit_or_veg = 1.
elif group == 'Legumes and Legume Products' : fruit_or_veg = 1.
return fruit_or_veg
def find_raw_aliment(search_dict):
''' Sometimes, the raw aliment is not the first to appear in search result, this function is there
to ensure that the raw aliment is preferred. '''
'''
Input :
search_dict(dictionary): product from the database
Output :
fruit_or_veg(float): returns the fruits/vegs/nuts content
'''
# Init
score_list = []
aliment_list = search_dict['list']['item']
bonus_list = ['Fruits and Fruit Juices','Vegetables and Vegetable Products','Legumes and Legume Products']
best_score = 0
# Attribute a score to each aliment that is more susceptible to be a raw aliment
for i in range(len(aliment_list)):
score = 0
# Use keywords 'raw' and 'unprepared' to detect raw aliments
if ('raw' in aliment_list[i]['name']) or ('unprepared' in aliment_list[i]['name']):
score += 1
# Use group (i.e category) to detect raw aliments
if (aliment_list[i]['group'] in bonus_list) : score += 1
# Store the score in a list
score_list.append(score)
# Return the aliment which has the highest score (there can be several) and is also the upper in the list
for i in range(len(aliment_list)):
# NB the entries are also classified by relevance in the database, so that the upper entries
# are more likely to be relevant
if score_list[i] == max(score_list) : return aliment_list[i]
def scrap(query_, ds_='Standard%20Reference', type_ = 'b'):
''' Scrap nutriment values from US Agriculture department database '''
'''
Input :
query(str): name of the product we want to query with the API (e.g 'pear')
ds_(str): Data source. Must be either 'Branded Food Products' or 'Standard Reference'
type_(str): Report type. [b]asic or [f]ull or [s]tats
Output :
fruit_or_veg(float): returns the fruits/vegs/nuts content
'''
# Init constant
kcal_to_kJ = 4.184
# Allow to handle spaces in query without any problem to establish url
error_ = { "errors": { "error": [{
"status": 400,
"parameter": "results",
"message": "Your search resulted in zero results.Change your parameters and try again" }]}}
query_ = query_.replace(' ', '%20')
# Parameters
api_key_ = '<KEY>' # Official API key for access to US gov database
format1_ = 'json' # Output format
sort_ = 'r' # Sort by relevance
max_ = '20' # Number of search result(s)
offset_ = '0' # Beginning row in the result
# Query the API (will list all the possible results)
url_search = 'https://api.nal.usda.gov/ndb/search/' + '?format=' + format1_ + '&q=' + query_ + \
'&max=' + max_ + '&sort=' + sort_ + '&offset=' + offset_ + '&ds=' + ds_ + '&api_key=' + api_key_
f_search = urlopen(url_search)
assert f_search.code == 200
search_dict = json.loads(f_search.read())
# Error handling
if search_dict == error_:
ds2_='Branded%20Food%20Products'
url_search = 'https://api.nal.usda.gov/ndb/search/' + '?format=' + format1_ + '&q=' + query_ + \
'&max=' + max_ + '&sort=' + sort_ + '&offset=' + offset_ + '&ds=' + ds2_ + '&api_key=' + api_key_
f_search = urlopen(url_search)
assert f_search.code == 200
search_dict = json.loads(f_search.read())
if search_dict == error_:
return {'Name' : np.nan,'kJ': np.nan,'Proteins' : np.nan,'Sugars' : np.nan,'Sat_fats' : np.nan,'Fibers' : np.nan,
'Sodium': np.nan,'Lipids' : np.nan,'Fruit_Veg_content' : np.nan}
# From the possible results list, we now have to choose the best product
# NB: this could be another product than the top product from the list
# In our case, we would like the find the most 'raw' product
f_search = urlopen(url_search)
assert f_search.code == 200
search_dict = json.loads(f_search.read())
# Find the most 'raw' element
raw_aliment = find_raw_aliment(search_dict)
# Identification number in the database
ndbno_ = raw_aliment['ndbno']
# Get the proper report and open it
url_food_report = 'https://api.nal.usda.gov/ndb/reports/' + '?ndbno=' + ndbno_ + '&type=' + type_ + \
'&format=' + format1_ + '&api_key=' + api_key_
f_food_report = urlopen(url_food_report)
assert f_food_report.code == 200
# Load report
food_report_dict = json.loads(f_food_report.read())
nutri_dict = food_report_dict['report']['food']['nutrients']
# Catch nutriments using ID from the US database
nutri_values = {
'Name' : raw_aliment['name'],
'kJ': catch_nutriment_value(nutri_dict, 208) * kcal_to_kJ,
'Proteins' : catch_nutriment_value(nutri_dict, 203),
'Sugars' : catch_nutriment_value(nutri_dict, 269),
'Sat_fats' : catch_nutriment_value(nutri_dict, 606),
'Fibers' : catch_nutriment_value(nutri_dict, 291),
'Sodium' : catch_nutriment_value(nutri_dict, 307),
'Lipids' : catch_nutriment_value(nutri_dict, 204),
'Fruit_Veg_content' : catch_fruit_or_veg(raw_aliment)
}
return nutri_values
def fill_from_Api(product_name):
''' This function uses the API from US Agriculture department to scrap information about the product '''
'''
Input :
product_name(str): name of the product we want to query with the API (e.g 'pear')
Output :
product_fill[column_for_product](pandas dataframe row): Row from the dataframe containing the product
with all information necessary to be compatible with the rest of the programm
'''
# The US database is ASCII-encoded, while our should at least be latin-1
# Therefore, we handle here the most frequent exceptions
query = product_name
query = re.sub('[éèêëÈÉÊË]', 'e', query)
query = re.sub('[àáâãäåæÀÁÂÃÄÅÆ]', 'a', query)
query = re.sub('[òóôõöøÒÓÔÕÖØ]', 'o', query)
query = re.sub('[ùúûüÙÚÛÜ]', 'u', query)
query = re.sub('[ìíîïÌÍÎÏ]', 'i', query)
query = re.sub('[ýÿÝŸ]', 'y', query)
query = re.sub('[ñÑ]', 'y', query)
query = re.sub('[çÇ]', 'c', query)
query = re.sub('[ß]', 'ss', query)
query = re.sub('[$£ÞÐð]', '', query)
# Scrap from the US database
dic = scrap(query_ = query)
# Format the result in the same system than openfoodfacts
tags = ' '
code = '000'
columns = {
'Name' : 'product_name',
'kJ' : 'energy_100g',
'Proteins': 'proteins_100g',
'Sugars' : 'sugars_100g',
'Sat_fats' : 'saturated-fat_100g',
'Fibers': 'fiber_100g',
'Sodium': 'sodium_100g',
'Lipids' : 'fat_100g',
'Fruit_Veg_content' : 'fruits-vegetables-nuts-estimate_100g'
}
# Only keep useful columns for the rest of the algorithm
column_for_product = ['product_name','categories_tags','energy_100g',
'fat_100g','saturated-fat_100g','sugars_100g',
'salt_100g','sodium_100g','fruits-vegetables-nuts_100g',
'fruits-vegetables-nuts-estimate_100g','fiber_100g','proteins_100g']
dic['code'] = code
dic['categories_tags'] = tags
dic['Sodium'] = dic['Sodium']*0.001 # mg => g
dic['salt_100g'] = dic['Sodium']*2.5 # extrapolate salt from sodium
if dic['Fruit_Veg_content'] == 1.:
dic['Fruit_Veg_content'] = 100.0
else:
dic['Fruit_Veg_content'] = 0.0
dic['fruits-vegetables-nuts_100g'] = np.nan
# Fill the product with the new data
product_fill = pd.DataFrame(data = dic, index = ['0']).set_index('code')
product_fill.rename(columns=columns, inplace=True)
return product_fill[column_for_product] | 0.431105 | 0.336876 |
from typing import Any, Dict, List, Optional
from ....models.models import Speaker
from ....permissions.permission_helper import has_perm
from ....permissions.permissions import Permissions
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.filters import And, FilterOperator, Or
from ....shared.patterns import Collection, FullQualifiedId
from ...mixins.create_action_with_inferred_meeting import (
CreateActionWithInferredMeeting,
)
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData
from .mixins import CheckSpeechState
from .sort import SpeakerSort
@register_action("speaker.create")
class SpeakerCreateAction(CheckSpeechState, CreateActionWithInferredMeeting):
model = Speaker()
relation_field_for_meeting = "list_of_speakers_id"
schema = DefaultSchema(Speaker()).get_create_schema(
required_properties=["list_of_speakers_id", "user_id"],
optional_properties=["point_of_order", "note", "speech_state"],
)
def get_updated_instances(self, action_data: ActionData) -> ActionData:
"""
Reason for this Exception: It's hard and specific doing the weight calculation
of creating speakers with point of orders, because of the used max- and min-datastore methods.
These should handle the still not generated speakers with specific filters.
But we don't need this functionality
"""
if len(action_data) > 1: # type: ignore
raise ActionException(
"It is not permitted to create more than one speaker per request!"
)
yield from super().get_updated_instances(action_data)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
if "note" in instance and not instance.get("point_of_order"):
raise ActionException("Not allowed to set note if not point of order.")
self.check_speech_state({}, instance)
weight_max = self._get_max_weight(
instance["list_of_speakers_id"], instance["meeting_id"]
)
if weight_max is None:
instance["weight"] = 1
return instance
if not instance.get("point_of_order"):
instance["weight"] = weight_max + 1
return instance
list_of_speakers_id = instance["list_of_speakers_id"]
weight_no_poos_min = self._get_no_poo_min(
list_of_speakers_id, instance["meeting_id"]
)
if weight_no_poos_min is None:
instance["weight"] = weight_max + 1
return instance
instance["weight"] = weight_no_poos_min
speaker_ids = self._insert_before_weight(
instance["id"],
weight_no_poos_min,
list_of_speakers_id,
instance["meeting_id"],
)
self.apply_instance(instance)
action_data = [
{
"list_of_speakers_id": list_of_speakers_id,
"speaker_ids": speaker_ids,
}
]
self.execute_other_action(SpeakerSort, action_data)
return instance
def _insert_before_weight(
self, new_id: int, weight: int, list_of_speakers_id: int, meeting_id: int
) -> List[int]:
"""
We need to bild a list of speakers, sort them by weight and
insert the new speaker before the entry with the weight from parameter
"""
filter = And(
FilterOperator("list_of_speakers_id", "=", list_of_speakers_id),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
)
speakers = self.datastore.filter(
self.model.collection,
filter=filter,
mapped_fields=["id", "weight"],
)
los = sorted(speakers.values(), key=lambda k: k["weight"])
list_to_sort = []
for speaker in los:
if speaker["weight"] == weight:
list_to_sort.append(new_id)
list_to_sort.append(speaker["id"])
return list_to_sort
def _get_max_weight(
self, list_of_speakers_id: int, meeting_id: int
) -> Optional[int]:
return self.datastore.max(
collection=Collection("speaker"),
filter=And(
FilterOperator("list_of_speakers_id", "=", list_of_speakers_id),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
),
field="weight",
)
def _get_no_poo_min(
self, list_of_speakers_id: int, meeting_id: int
) -> Optional[int]:
return self.datastore.min(
collection=Collection("speaker"),
filter=And(
FilterOperator("list_of_speakers_id", "=", list_of_speakers_id),
Or(
FilterOperator("point_of_order", "=", False),
FilterOperator("point_of_order", "=", None),
),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
),
field="weight",
)
def validate_fields(self, instance: Dict[str, Any]) -> Dict[str, Any]:
"""
Checks
- that only the requesting user can file a point-of-order
- that a new speaker does not already exist on the list of speaker as
waiting speaker (with begin_time == None), but allows one additional with point_of_order speaker per user
- that points_of_order are used in this meeting
- that user has to be present to be added to the list of speakers
- that request-user cannot create a speaker without being point_of_order, a not closed los is closed and no list_of_speakers.can_manage permission
"""
if instance.get("point_of_order") and instance.get("user_id") != self.user_id:
raise ActionException(
f"The requesting user {self.user_id} is not the user {instance.get('user_id')} the point-of-order is filed for."
)
los_fqid = FullQualifiedId(
Collection("list_of_speakers"), instance["list_of_speakers_id"]
)
los = self.datastore.get(los_fqid, ["meeting_id", "closed"])
meeting_id = los["meeting_id"]
meeting_fqid = FullQualifiedId(Collection("meeting"), meeting_id)
meeting = self.datastore.get(
meeting_fqid,
[
"list_of_speakers_enable_point_of_order_speakers",
"list_of_speakers_present_users_only",
],
)
if instance.get("point_of_order") and not meeting.get(
"list_of_speakers_enable_point_of_order_speakers"
):
raise ActionException(
"Point of order speakers are not enabled for this meeting."
)
if (
not instance.get("point_of_order")
and los.get("closed")
and instance.get("user_id") == self.user_id
and not has_perm(
self.datastore,
self.user_id,
Permissions.ListOfSpeakers.CAN_MANAGE,
meeting_id,
)
):
raise ActionException("The list of speakers is closed.")
if meeting.get("list_of_speakers_present_users_only"):
user_fqid = FullQualifiedId(Collection("user"), instance["user_id"])
user = self.datastore.get(user_fqid, ["is_present_in_meeting_ids"])
if meeting_id not in user.get("is_present_in_meeting_ids", ()):
raise ActionException(
"Only present users can be on the lists of speakers."
)
# Results are necessary, because of getting a lock_result
filter_obj = And(
FilterOperator("list_of_speakers_id", "=", instance["list_of_speakers_id"]),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
)
speakers = self.datastore.filter(
collection=Collection("speaker"),
filter=filter_obj,
mapped_fields=["user_id", "point_of_order"],
)
for speaker in speakers.values():
if speaker["user_id"] == instance["user_id"] and bool(
speaker.get("point_of_order")
) == bool(instance.get("point_of_order")):
raise ActionException(
f"User {instance['user_id']} is already on the list of speakers."
)
return super().validate_fields(instance)
def check_permissions(self, instance: Dict[str, Any]) -> None:
if instance.get("user_id") == self.user_id:
permission = Permissions.ListOfSpeakers.CAN_BE_SPEAKER
else:
permission = Permissions.ListOfSpeakers.CAN_MANAGE
meeting_id = self.get_meeting_id(instance)
if has_perm(self.datastore, self.user_id, permission, meeting_id):
return
raise MissingPermission(permission) | openslides_backend/action/actions/speaker/create.py | from typing import Any, Dict, List, Optional
from ....models.models import Speaker
from ....permissions.permission_helper import has_perm
from ....permissions.permissions import Permissions
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.filters import And, FilterOperator, Or
from ....shared.patterns import Collection, FullQualifiedId
from ...mixins.create_action_with_inferred_meeting import (
CreateActionWithInferredMeeting,
)
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData
from .mixins import CheckSpeechState
from .sort import SpeakerSort
@register_action("speaker.create")
class SpeakerCreateAction(CheckSpeechState, CreateActionWithInferredMeeting):
model = Speaker()
relation_field_for_meeting = "list_of_speakers_id"
schema = DefaultSchema(Speaker()).get_create_schema(
required_properties=["list_of_speakers_id", "user_id"],
optional_properties=["point_of_order", "note", "speech_state"],
)
def get_updated_instances(self, action_data: ActionData) -> ActionData:
"""
Reason for this Exception: It's hard and specific doing the weight calculation
of creating speakers with point of orders, because of the used max- and min-datastore methods.
These should handle the still not generated speakers with specific filters.
But we don't need this functionality
"""
if len(action_data) > 1: # type: ignore
raise ActionException(
"It is not permitted to create more than one speaker per request!"
)
yield from super().get_updated_instances(action_data)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
if "note" in instance and not instance.get("point_of_order"):
raise ActionException("Not allowed to set note if not point of order.")
self.check_speech_state({}, instance)
weight_max = self._get_max_weight(
instance["list_of_speakers_id"], instance["meeting_id"]
)
if weight_max is None:
instance["weight"] = 1
return instance
if not instance.get("point_of_order"):
instance["weight"] = weight_max + 1
return instance
list_of_speakers_id = instance["list_of_speakers_id"]
weight_no_poos_min = self._get_no_poo_min(
list_of_speakers_id, instance["meeting_id"]
)
if weight_no_poos_min is None:
instance["weight"] = weight_max + 1
return instance
instance["weight"] = weight_no_poos_min
speaker_ids = self._insert_before_weight(
instance["id"],
weight_no_poos_min,
list_of_speakers_id,
instance["meeting_id"],
)
self.apply_instance(instance)
action_data = [
{
"list_of_speakers_id": list_of_speakers_id,
"speaker_ids": speaker_ids,
}
]
self.execute_other_action(SpeakerSort, action_data)
return instance
def _insert_before_weight(
self, new_id: int, weight: int, list_of_speakers_id: int, meeting_id: int
) -> List[int]:
"""
We need to bild a list of speakers, sort them by weight and
insert the new speaker before the entry with the weight from parameter
"""
filter = And(
FilterOperator("list_of_speakers_id", "=", list_of_speakers_id),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
)
speakers = self.datastore.filter(
self.model.collection,
filter=filter,
mapped_fields=["id", "weight"],
)
los = sorted(speakers.values(), key=lambda k: k["weight"])
list_to_sort = []
for speaker in los:
if speaker["weight"] == weight:
list_to_sort.append(new_id)
list_to_sort.append(speaker["id"])
return list_to_sort
def _get_max_weight(
self, list_of_speakers_id: int, meeting_id: int
) -> Optional[int]:
return self.datastore.max(
collection=Collection("speaker"),
filter=And(
FilterOperator("list_of_speakers_id", "=", list_of_speakers_id),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
),
field="weight",
)
def _get_no_poo_min(
self, list_of_speakers_id: int, meeting_id: int
) -> Optional[int]:
return self.datastore.min(
collection=Collection("speaker"),
filter=And(
FilterOperator("list_of_speakers_id", "=", list_of_speakers_id),
Or(
FilterOperator("point_of_order", "=", False),
FilterOperator("point_of_order", "=", None),
),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
),
field="weight",
)
def validate_fields(self, instance: Dict[str, Any]) -> Dict[str, Any]:
"""
Checks
- that only the requesting user can file a point-of-order
- that a new speaker does not already exist on the list of speaker as
waiting speaker (with begin_time == None), but allows one additional with point_of_order speaker per user
- that points_of_order are used in this meeting
- that user has to be present to be added to the list of speakers
- that request-user cannot create a speaker without being point_of_order, a not closed los is closed and no list_of_speakers.can_manage permission
"""
if instance.get("point_of_order") and instance.get("user_id") != self.user_id:
raise ActionException(
f"The requesting user {self.user_id} is not the user {instance.get('user_id')} the point-of-order is filed for."
)
los_fqid = FullQualifiedId(
Collection("list_of_speakers"), instance["list_of_speakers_id"]
)
los = self.datastore.get(los_fqid, ["meeting_id", "closed"])
meeting_id = los["meeting_id"]
meeting_fqid = FullQualifiedId(Collection("meeting"), meeting_id)
meeting = self.datastore.get(
meeting_fqid,
[
"list_of_speakers_enable_point_of_order_speakers",
"list_of_speakers_present_users_only",
],
)
if instance.get("point_of_order") and not meeting.get(
"list_of_speakers_enable_point_of_order_speakers"
):
raise ActionException(
"Point of order speakers are not enabled for this meeting."
)
if (
not instance.get("point_of_order")
and los.get("closed")
and instance.get("user_id") == self.user_id
and not has_perm(
self.datastore,
self.user_id,
Permissions.ListOfSpeakers.CAN_MANAGE,
meeting_id,
)
):
raise ActionException("The list of speakers is closed.")
if meeting.get("list_of_speakers_present_users_only"):
user_fqid = FullQualifiedId(Collection("user"), instance["user_id"])
user = self.datastore.get(user_fqid, ["is_present_in_meeting_ids"])
if meeting_id not in user.get("is_present_in_meeting_ids", ()):
raise ActionException(
"Only present users can be on the lists of speakers."
)
# Results are necessary, because of getting a lock_result
filter_obj = And(
FilterOperator("list_of_speakers_id", "=", instance["list_of_speakers_id"]),
FilterOperator("begin_time", "=", None),
FilterOperator("meeting_id", "=", meeting_id),
)
speakers = self.datastore.filter(
collection=Collection("speaker"),
filter=filter_obj,
mapped_fields=["user_id", "point_of_order"],
)
for speaker in speakers.values():
if speaker["user_id"] == instance["user_id"] and bool(
speaker.get("point_of_order")
) == bool(instance.get("point_of_order")):
raise ActionException(
f"User {instance['user_id']} is already on the list of speakers."
)
return super().validate_fields(instance)
def check_permissions(self, instance: Dict[str, Any]) -> None:
if instance.get("user_id") == self.user_id:
permission = Permissions.ListOfSpeakers.CAN_BE_SPEAKER
else:
permission = Permissions.ListOfSpeakers.CAN_MANAGE
meeting_id = self.get_meeting_id(instance)
if has_perm(self.datastore, self.user_id, permission, meeting_id):
return
raise MissingPermission(permission) | 0.768907 | 0.16502 |
import random
import unittest
from QNetwork.qkd.diqkd import DIQKDNode, DIQKDSenderNode, DIQKDReceiverNode
from QNetwork.q_network_channels import QState
class QChannelDummy:
pass
class QChannelSpy:
def __init__(self):
self.received_bases = []
def send_epr(self, bases):
self.received_bases = bases
def receive_epr_in(self, bases):
self.received_bases = bases
class CACStub:
def __init__(self):
self.received = None
def receive(self):
return self.received
def send(self, data):
pass
class CACSpy:
def __init__(self):
self.data_sent = None
def send(self, data):
self.data_sent = data
class QubitSpy:
def __init__(self):
self.rotation_steps = 0
self.received_Z = False
def rot_Y(self, step):
self.rotation_steps = step
def Z(self):
self.received_Z = True
class DIQKDNodeSUT(DIQKDNode):
def share_q_states(self):
pass
def should_abort(self):
pass
def generate_key(self):
pass
class TestDIQKDEntangledSharing(unittest.TestCase):
def setUp(self):
self.qc = QChannelSpy()
self.cac = CACStub()
self.node = DIQKDNodeSUT(self.qc, self.cac, 0)
def test_send_entangled_states(self):
random.seed(42)
self.node._send_q_states(4)
self.assertSequenceEqual([0, 0, 1, 0], self.qc.received_bases)
def test_receive_entangled_states(self):
random.seed(7)
self.cac.received = [4]
self.node._receive_q_states()
self.assertSequenceEqual([1, 0, 1, 2], self.qc.received_bases)
class TestDIQKDSending(unittest.TestCase):
def setUp(self):
self.cac = CACSpy()
self.node = DIQKDNodeSUT(None, self.cac, 0)
def test_send_chsh_values(self):
self.node._qstates = [QState(1, 0), QState(0, 1), QState(0, 0), QState(1, 1)]
self.node._chsh_test_set = {0, 1}
self.node._send_chsh_test_values()
self.assertSequenceEqual([1, 0], self.cac.data_sent)
self.assertSequenceEqual([1, 0], self.node._chsh_test_values)
def test_send_match_values(self):
self.node._qstates = [QState(1, 0), QState(0, 1), QState(0, 0), QState(1, 1)]
self.node._match_test_set = {0, 1}
self.node._send_match_test_values()
self.assertSequenceEqual([1, 0], self.cac.data_sent)
self.assertSequenceEqual([1, 0], self.node._match_test_values)
class TestDIQKDCommonOperations(unittest.TestCase):
def setUp(self):
self.node = DIQKDNodeSUT(None, None, 0.1)
def test_calculate_win_probability(self):
self.node._qstates = [QState(1, 0), QState(0, 1), QState(1, 1), QState(0, 1), QState(0, 0)]
self.node._chsh_test_values = [1, 0, 1, 0]
self.node._other_chsh_test_values = [1, 1, 0, 0]
self.node._other_bases = [1, 1, 0, 0, 0]
self.node._chsh_test_set = {0, 1, 2, 3}
self.assertAlmostEqual(0.75, self.node._calculate_winning_probability())
def test_calculate_match_error(self):
self.node._match_test_values = [1, 0, 0, 1]
self.node._other_match_test_values = [1, 1, 0, 1]
self.assertAlmostEqual(0.25, self.node._calculate_match_error())
def test_pwin_outside_error_bound(self):
self.assertTrue(self.node._is_outside_error_bound(0.75, 1.0))
def test_pwin_inside_error_bound(self):
self.assertFalse(self.node._is_outside_error_bound(0.85, 1.0))
def test_match_outside_error_bound(self):
self.assertTrue(self.node._is_outside_error_bound(0.85, 0.8))
def test_match_inside_error_bound(self):
self.assertFalse(self.node._is_outside_error_bound(0.85, 0.95))
def test_privacy_amplification_even(self):
self.node._qstates = [QState(1, 0), QState(1, 0), QState(0, 0), QState(1, 0), QState(1, 0)]
self.node._raw_key_set = {1, 3, 4}
self.node._seed = [1, 1, 0]
self.assertEqual([0], self.node._privacy_amplification())
def test_privacy_amplification_odd(self):
self.node._qstates = [QState(1, 0), QState(1, 0), QState(0, 0), QState(1, 0), QState(1, 0)]
self.node._raw_key_set = {1, 3, 4}
self.node._seed = [1, 1, 1]
self.assertEqual([1], self.node._privacy_amplification())
class TestDIQKDSenderOperations(unittest.TestCase):
def setUp(self):
self.node = DIQKDSenderNode(None, None, 0, None)
def test_subset_separation(self):
self.node._other_bases = [1, 0, 2, 0, 2, 2]
self.node._test_set = {0, 1, 2}
self.node._qstates = [QState(1, 0), QState(0, 0), QState(1, 0), QState(0, 0), QState(1, 1), QState(0, 0)]
self.node._separate_test_subsets()
self.assert_test_sets(expected_chsh={0, 1}, expected_match={2}, expected_raw_key={5})
def assert_test_sets(self, expected_chsh, expected_match, expected_raw_key):
self.assertEqual(expected_chsh, self.node._chsh_test_set)
self.assertEqual(expected_match, self.node._match_test_set)
self.assertEqual(expected_raw_key, self.node._raw_key_set)
class TestDIQKDReceiverOperations(unittest.TestCase):
def setUp(self):
self.qc = QChannelDummy()
self.node = DIQKDReceiverNode(self.qc, None, 0)
def test_has_correct_bases_mapping(self):
q1 = QubitSpy()
self.node.q_channel.bases_mapping[0](q1)
self.assertEqual(32, q1.rotation_steps)
q2 = QubitSpy()
self.node.q_channel.bases_mapping[1](q2)
self.assertEqual(32, q2.rotation_steps)
self.assertTrue(q2.received_Z)
q3 = QubitSpy()
self.node.q_channel.bases_mapping[2](q3)
self.assertEqual(0, q3.rotation_steps)
def test_subset_separation(self):
self.node._other_bases = [1, 0, 0, 0, 1, 0]
self.node._test_set = {0, 1, 2}
self.node._qstates = [QState(1, 1), QState(0, 0), QState(1, 2), QState(0, 0), QState(1, 1), QState(0, 2)]
self.node._separate_test_subsets()
self.assert_test_sets(expected_chsh={0, 1}, expected_match={2}, expected_raw_key={5})
def assert_test_sets(self, expected_chsh, expected_match, expected_raw_key):
self.assertEqual(expected_chsh, self.node._chsh_test_set)
self.assertEqual(expected_match, self.node._match_test_set)
self.assertEqual(expected_raw_key, self.node._raw_key_set)
class TestDIQKDReceiving(unittest.TestCase):
def setUp(self):
self.cac = CACStub()
self.node = DIQKDNodeSUT(None, self.cac, 0)
def test_receive_chsh_values(self):
self.cac.received = [1, 1, 0, 0]
self.node._receive_chsh_test_values()
self.assertSequenceEqual(self.cac.received, self.node._other_chsh_test_values)
def test_receive_match_values(self):
self.cac.received = [1, 1, 0, 0]
self.node._receive_match_test_values()
self.assertSequenceEqual(self.cac.received, self.node._other_match_test_values)
class DIQKDSenderNodeSpy(DIQKDSenderNode):
def __init__(self):
super().__init__(None, None, None, 0)
self.operations = []
def _send_q_states(self, amount):
self.operations.append("_send_q_states")
def _receive_ack(self):
self.operations.append("_receive_ack")
def _share_bases(self):
self.operations.append("_share_bases")
def _send_test_set(self):
self.operations.append("_send_test_set")
def _separate_test_subsets(self):
self.operations.append("_separate_test_subsets")
def _send_chsh_test_values(self):
self.operations.append("_send_chsh_test_values")
def _receive_chsh_test_values(self):
self.operations.append("_receive_chsh_test_values")
def _send_match_test_values(self):
self.operations.append("_send_match_test_values")
def _receive_match_test_values(self):
self.operations.append("_receive_match_test_values")
def _calculate_winning_probability(self):
self.operations.append("_calculate_winning_probability")
def _calculate_match_error(self):
self.operations.append("_calculate_match_error")
def _is_outside_error_bound(self, win_prob, matching_error):
self.operations.append("_is_outside_error_bound")
def _send_seed(self):
self.operations.append("_send_seed")
def _privacy_amplification(self):
self.operations.append("_privacy_amplification")
class TestDIQKDSenderFlow(unittest.TestCase):
def setUp(self):
self.node = DIQKDSenderNodeSpy()
def test_share_q_states(self):
self.node.share_q_states()
self.assertSequenceEqual(["_send_q_states", "_receive_ack", "_share_bases"], self.node.operations)
def test_should_abort(self):
self.node.should_abort()
self.assertSequenceEqual(
["_send_test_set",
"_separate_test_subsets",
"_send_chsh_test_values",
"_receive_chsh_test_values",
"_send_match_test_values",
"_receive_match_test_values",
"_calculate_winning_probability",
"_calculate_match_error",
"_is_outside_error_bound"],
self.node.operations)
def test_generate_key(self):
self.node.generate_key()
self.assertSequenceEqual(["_send_seed", "_privacy_amplification"], self.node.operations)
class DIQKDReceiverNodeSpy(DIQKDReceiverNode):
def __init__(self):
super().__init__(QChannelDummy(), None, 0)
self.operations = []
def _receive_q_states(self):
self.operations.append("_receive_q_states")
def _send_ack(self):
self.operations.append("_send_ack")
def _share_bases(self):
self.operations.append("_share_bases")
def _receive_test_set(self):
self.operations.append("_receive_test_set")
def _separate_test_subsets(self):
self.operations.append("_separate_test_subsets")
def _send_chsh_test_values(self):
self.operations.append("_send_chsh_test_values")
def _receive_chsh_test_values(self):
self.operations.append("_receive_chsh_test_values")
def _send_match_test_values(self):
self.operations.append("_send_match_test_values")
def _receive_match_test_values(self):
self.operations.append("_receive_match_test_values")
def _calculate_winning_probability(self):
self.operations.append("_calculate_winning_probability")
def _calculate_match_error(self):
self.operations.append("_calculate_match_error")
def _is_outside_error_bound(self, win_prob, matching_error):
self.operations.append("_is_outside_error_bound")
def _receive_seed(self):
self.operations.append("_receive_seed")
def _privacy_amplification(self):
self.operations.append("_privacy_amplification")
class TestDIQKDReceiverFlow(unittest.TestCase):
def setUp(self):
self.node = DIQKDReceiverNodeSpy()
def test_share_q_states(self):
self.node.share_q_states()
self.assertSequenceEqual(["_receive_q_states", "_send_ack", "_share_bases"], self.node.operations)
def test_should_abort(self):
self.node.should_abort()
self.assertSequenceEqual(
["_receive_test_set",
"_separate_test_subsets",
"_send_chsh_test_values",
"_receive_chsh_test_values",
"_send_match_test_values",
"_receive_match_test_values",
"_calculate_winning_probability",
"_calculate_match_error",
"_is_outside_error_bound"],
self.node.operations)
def test_generate_key(self):
self.node.generate_key()
self.assertSequenceEqual(["_receive_seed", "_privacy_amplification"], self.node.operations) | QNetwork/tests/test_diqkd.py | import random
import unittest
from QNetwork.qkd.diqkd import DIQKDNode, DIQKDSenderNode, DIQKDReceiverNode
from QNetwork.q_network_channels import QState
class QChannelDummy:
pass
class QChannelSpy:
def __init__(self):
self.received_bases = []
def send_epr(self, bases):
self.received_bases = bases
def receive_epr_in(self, bases):
self.received_bases = bases
class CACStub:
def __init__(self):
self.received = None
def receive(self):
return self.received
def send(self, data):
pass
class CACSpy:
def __init__(self):
self.data_sent = None
def send(self, data):
self.data_sent = data
class QubitSpy:
def __init__(self):
self.rotation_steps = 0
self.received_Z = False
def rot_Y(self, step):
self.rotation_steps = step
def Z(self):
self.received_Z = True
class DIQKDNodeSUT(DIQKDNode):
def share_q_states(self):
pass
def should_abort(self):
pass
def generate_key(self):
pass
class TestDIQKDEntangledSharing(unittest.TestCase):
def setUp(self):
self.qc = QChannelSpy()
self.cac = CACStub()
self.node = DIQKDNodeSUT(self.qc, self.cac, 0)
def test_send_entangled_states(self):
random.seed(42)
self.node._send_q_states(4)
self.assertSequenceEqual([0, 0, 1, 0], self.qc.received_bases)
def test_receive_entangled_states(self):
random.seed(7)
self.cac.received = [4]
self.node._receive_q_states()
self.assertSequenceEqual([1, 0, 1, 2], self.qc.received_bases)
class TestDIQKDSending(unittest.TestCase):
def setUp(self):
self.cac = CACSpy()
self.node = DIQKDNodeSUT(None, self.cac, 0)
def test_send_chsh_values(self):
self.node._qstates = [QState(1, 0), QState(0, 1), QState(0, 0), QState(1, 1)]
self.node._chsh_test_set = {0, 1}
self.node._send_chsh_test_values()
self.assertSequenceEqual([1, 0], self.cac.data_sent)
self.assertSequenceEqual([1, 0], self.node._chsh_test_values)
def test_send_match_values(self):
self.node._qstates = [QState(1, 0), QState(0, 1), QState(0, 0), QState(1, 1)]
self.node._match_test_set = {0, 1}
self.node._send_match_test_values()
self.assertSequenceEqual([1, 0], self.cac.data_sent)
self.assertSequenceEqual([1, 0], self.node._match_test_values)
class TestDIQKDCommonOperations(unittest.TestCase):
def setUp(self):
self.node = DIQKDNodeSUT(None, None, 0.1)
def test_calculate_win_probability(self):
self.node._qstates = [QState(1, 0), QState(0, 1), QState(1, 1), QState(0, 1), QState(0, 0)]
self.node._chsh_test_values = [1, 0, 1, 0]
self.node._other_chsh_test_values = [1, 1, 0, 0]
self.node._other_bases = [1, 1, 0, 0, 0]
self.node._chsh_test_set = {0, 1, 2, 3}
self.assertAlmostEqual(0.75, self.node._calculate_winning_probability())
def test_calculate_match_error(self):
self.node._match_test_values = [1, 0, 0, 1]
self.node._other_match_test_values = [1, 1, 0, 1]
self.assertAlmostEqual(0.25, self.node._calculate_match_error())
def test_pwin_outside_error_bound(self):
self.assertTrue(self.node._is_outside_error_bound(0.75, 1.0))
def test_pwin_inside_error_bound(self):
self.assertFalse(self.node._is_outside_error_bound(0.85, 1.0))
def test_match_outside_error_bound(self):
self.assertTrue(self.node._is_outside_error_bound(0.85, 0.8))
def test_match_inside_error_bound(self):
self.assertFalse(self.node._is_outside_error_bound(0.85, 0.95))
def test_privacy_amplification_even(self):
self.node._qstates = [QState(1, 0), QState(1, 0), QState(0, 0), QState(1, 0), QState(1, 0)]
self.node._raw_key_set = {1, 3, 4}
self.node._seed = [1, 1, 0]
self.assertEqual([0], self.node._privacy_amplification())
def test_privacy_amplification_odd(self):
self.node._qstates = [QState(1, 0), QState(1, 0), QState(0, 0), QState(1, 0), QState(1, 0)]
self.node._raw_key_set = {1, 3, 4}
self.node._seed = [1, 1, 1]
self.assertEqual([1], self.node._privacy_amplification())
class TestDIQKDSenderOperations(unittest.TestCase):
def setUp(self):
self.node = DIQKDSenderNode(None, None, 0, None)
def test_subset_separation(self):
self.node._other_bases = [1, 0, 2, 0, 2, 2]
self.node._test_set = {0, 1, 2}
self.node._qstates = [QState(1, 0), QState(0, 0), QState(1, 0), QState(0, 0), QState(1, 1), QState(0, 0)]
self.node._separate_test_subsets()
self.assert_test_sets(expected_chsh={0, 1}, expected_match={2}, expected_raw_key={5})
def assert_test_sets(self, expected_chsh, expected_match, expected_raw_key):
self.assertEqual(expected_chsh, self.node._chsh_test_set)
self.assertEqual(expected_match, self.node._match_test_set)
self.assertEqual(expected_raw_key, self.node._raw_key_set)
class TestDIQKDReceiverOperations(unittest.TestCase):
def setUp(self):
self.qc = QChannelDummy()
self.node = DIQKDReceiverNode(self.qc, None, 0)
def test_has_correct_bases_mapping(self):
q1 = QubitSpy()
self.node.q_channel.bases_mapping[0](q1)
self.assertEqual(32, q1.rotation_steps)
q2 = QubitSpy()
self.node.q_channel.bases_mapping[1](q2)
self.assertEqual(32, q2.rotation_steps)
self.assertTrue(q2.received_Z)
q3 = QubitSpy()
self.node.q_channel.bases_mapping[2](q3)
self.assertEqual(0, q3.rotation_steps)
def test_subset_separation(self):
self.node._other_bases = [1, 0, 0, 0, 1, 0]
self.node._test_set = {0, 1, 2}
self.node._qstates = [QState(1, 1), QState(0, 0), QState(1, 2), QState(0, 0), QState(1, 1), QState(0, 2)]
self.node._separate_test_subsets()
self.assert_test_sets(expected_chsh={0, 1}, expected_match={2}, expected_raw_key={5})
def assert_test_sets(self, expected_chsh, expected_match, expected_raw_key):
self.assertEqual(expected_chsh, self.node._chsh_test_set)
self.assertEqual(expected_match, self.node._match_test_set)
self.assertEqual(expected_raw_key, self.node._raw_key_set)
class TestDIQKDReceiving(unittest.TestCase):
def setUp(self):
self.cac = CACStub()
self.node = DIQKDNodeSUT(None, self.cac, 0)
def test_receive_chsh_values(self):
self.cac.received = [1, 1, 0, 0]
self.node._receive_chsh_test_values()
self.assertSequenceEqual(self.cac.received, self.node._other_chsh_test_values)
def test_receive_match_values(self):
self.cac.received = [1, 1, 0, 0]
self.node._receive_match_test_values()
self.assertSequenceEqual(self.cac.received, self.node._other_match_test_values)
class DIQKDSenderNodeSpy(DIQKDSenderNode):
def __init__(self):
super().__init__(None, None, None, 0)
self.operations = []
def _send_q_states(self, amount):
self.operations.append("_send_q_states")
def _receive_ack(self):
self.operations.append("_receive_ack")
def _share_bases(self):
self.operations.append("_share_bases")
def _send_test_set(self):
self.operations.append("_send_test_set")
def _separate_test_subsets(self):
self.operations.append("_separate_test_subsets")
def _send_chsh_test_values(self):
self.operations.append("_send_chsh_test_values")
def _receive_chsh_test_values(self):
self.operations.append("_receive_chsh_test_values")
def _send_match_test_values(self):
self.operations.append("_send_match_test_values")
def _receive_match_test_values(self):
self.operations.append("_receive_match_test_values")
def _calculate_winning_probability(self):
self.operations.append("_calculate_winning_probability")
def _calculate_match_error(self):
self.operations.append("_calculate_match_error")
def _is_outside_error_bound(self, win_prob, matching_error):
self.operations.append("_is_outside_error_bound")
def _send_seed(self):
self.operations.append("_send_seed")
def _privacy_amplification(self):
self.operations.append("_privacy_amplification")
class TestDIQKDSenderFlow(unittest.TestCase):
def setUp(self):
self.node = DIQKDSenderNodeSpy()
def test_share_q_states(self):
self.node.share_q_states()
self.assertSequenceEqual(["_send_q_states", "_receive_ack", "_share_bases"], self.node.operations)
def test_should_abort(self):
self.node.should_abort()
self.assertSequenceEqual(
["_send_test_set",
"_separate_test_subsets",
"_send_chsh_test_values",
"_receive_chsh_test_values",
"_send_match_test_values",
"_receive_match_test_values",
"_calculate_winning_probability",
"_calculate_match_error",
"_is_outside_error_bound"],
self.node.operations)
def test_generate_key(self):
self.node.generate_key()
self.assertSequenceEqual(["_send_seed", "_privacy_amplification"], self.node.operations)
class DIQKDReceiverNodeSpy(DIQKDReceiverNode):
def __init__(self):
super().__init__(QChannelDummy(), None, 0)
self.operations = []
def _receive_q_states(self):
self.operations.append("_receive_q_states")
def _send_ack(self):
self.operations.append("_send_ack")
def _share_bases(self):
self.operations.append("_share_bases")
def _receive_test_set(self):
self.operations.append("_receive_test_set")
def _separate_test_subsets(self):
self.operations.append("_separate_test_subsets")
def _send_chsh_test_values(self):
self.operations.append("_send_chsh_test_values")
def _receive_chsh_test_values(self):
self.operations.append("_receive_chsh_test_values")
def _send_match_test_values(self):
self.operations.append("_send_match_test_values")
def _receive_match_test_values(self):
self.operations.append("_receive_match_test_values")
def _calculate_winning_probability(self):
self.operations.append("_calculate_winning_probability")
def _calculate_match_error(self):
self.operations.append("_calculate_match_error")
def _is_outside_error_bound(self, win_prob, matching_error):
self.operations.append("_is_outside_error_bound")
def _receive_seed(self):
self.operations.append("_receive_seed")
def _privacy_amplification(self):
self.operations.append("_privacy_amplification")
class TestDIQKDReceiverFlow(unittest.TestCase):
def setUp(self):
self.node = DIQKDReceiverNodeSpy()
def test_share_q_states(self):
self.node.share_q_states()
self.assertSequenceEqual(["_receive_q_states", "_send_ack", "_share_bases"], self.node.operations)
def test_should_abort(self):
self.node.should_abort()
self.assertSequenceEqual(
["_receive_test_set",
"_separate_test_subsets",
"_send_chsh_test_values",
"_receive_chsh_test_values",
"_send_match_test_values",
"_receive_match_test_values",
"_calculate_winning_probability",
"_calculate_match_error",
"_is_outside_error_bound"],
self.node.operations)
def test_generate_key(self):
self.node.generate_key()
self.assertSequenceEqual(["_receive_seed", "_privacy_amplification"], self.node.operations) | 0.713432 | 0.599339 |
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from distarray.externals.six.moves import range
from distarray.testing import import_or_skip, DefaultContextTestCase
from distarray.globalapi.distarray import DistArray
from distarray.globalapi.maps import Distribution
def cleanup_file(filepath):
import os
if os.path.exists(filepath):
os.remove(filepath)
def engine_temp_path(extension=''):
from distarray.testing import temp_filepath
return temp_filepath(extension)
class TestDnpyFileIO(DefaultContextTestCase):
@classmethod
def setUpClass(cls):
super(TestDnpyFileIO, cls).setUpClass()
cls.distribution = Distribution(cls.context, (100,), dist={0: 'b'})
cls.da = cls.context.empty(cls.distribution)
cls.output_paths = cls.context.apply(engine_temp_path)
def test_save_load_with_filenames(self):
try:
self.context.save_dnpy(self.output_paths, self.da)
db = self.context.load_dnpy(self.output_paths)
self.assertTrue(isinstance(db, DistArray))
self.assertEqual(self.da, db)
finally:
for filepath, target in zip(self.output_paths, self.context.targets):
self.context.apply(cleanup_file, (filepath,), targets=(target,))
def test_save_load_with_prefix(self):
output_path = self.output_paths[0]
try:
self.context.save_dnpy(output_path, self.da)
db = self.context.load_dnpy(output_path)
self.assertTrue(isinstance(db, DistArray))
self.assertEqual(self.da, db)
finally:
for rank in self.context.targets:
filepath = output_path + "_" + str(rank) + ".dnpy"
self.context.apply(cleanup_file, (filepath,), targets=(rank,))
bn_test_data = [
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
'stop': 1,
},
{'size': 10,
'dist_type': 'n',
}),
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
'stop': 2,
},
{'size': 10,
'dist_type': 'n',
})
]
nc_test_data = [
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
},),
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
},)
]
nu_test_data = [
# Note: unstructured indices must be in increasing order
# (restriction of h5py / HDF5)
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'indices': [0, 3, 4, 6, 8],
},
),
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'indices': [1, 2, 5, 7, 9],
},
)
]
class TestNpyFileLoad(DefaultContextTestCase):
"""Try loading a .npy file on the engines.
This test assumes that all engines have access to the same file system.
"""
ntargets = 2
@classmethod
def setUpClass(cls):
super(TestNpyFileLoad, cls).setUpClass()
cls.expected = np.arange(20).reshape(2, 10)
def save_test_file(data):
import numpy
from distarray.testing import temp_filepath
output_path = temp_filepath('.npy')
numpy.save(output_path, data)
return output_path
cls.output_path = cls.context.apply(save_test_file, (cls.expected,),
targets=[cls.context.targets[0]])[0] # noqa
@classmethod
def tearDownClass(cls):
cls.context.apply(cleanup_file, (cls.output_path,),
targets=[cls.context.targets[0]])
super(TestNpyFileLoad, cls).tearDownClass()
def test_load_bn(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
bn_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def test_load_nc(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nc_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def test_load_nu(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nu_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def check_hdf5_file(output_path, expected, dataset="buffer"):
import h5py
import numpy
with h5py.File(output_path, 'r') as fp:
if dataset not in fp:
return False
if not numpy.array_equal(expected, fp[dataset]):
return False
return True
class TestHdf5FileSave(DefaultContextTestCase):
def setUp(self):
super(TestHdf5FileSave, self).setUp()
self.h5py = import_or_skip('h5py')
self.output_path = self.context.apply(engine_temp_path, ('.hdf5',),
targets=[self.context.targets[0]])[0]
def tearDown(self):
self.context.apply(cleanup_file, (self.output_path,),
targets=[self.context.targets[0]])
def test_save_block(self):
datalen = 33
expected = np.arange(datalen)
da = self.context.fromarray(expected)
self.context.save_hdf5(self.output_path, da, mode='w')
file_check = self.context.apply(check_hdf5_file,
(self.output_path, expected),
targets=[self.context.targets[0]])[0]
self.assertTrue(file_check)
def test_save_3d(self):
shape = (4, 5, 3)
expected = np.random.random(shape)
dist = {0: 'b', 1: 'c', 2: 'n'}
distribution = Distribution(self.context, shape, dist=dist)
da = self.context.fromarray(expected, distribution=distribution)
self.context.save_hdf5(self.output_path, da, mode='w')
file_check = self.context.apply(check_hdf5_file,
(self.output_path, expected),
targets=[self.context.targets[0]])[0]
self.assertTrue(file_check)
def test_save_two_datasets(self):
datalen = 33
foo = np.arange(datalen)
bar = np.random.random(datalen)
da_foo = self.context.fromarray(foo)
da_bar = self.context.fromarray(bar)
# save 'foo' to a file
self.context.save_hdf5(self.output_path, da_foo, key='foo', mode='w')
# save 'bar' to a different dataset in the same file
self.context.save_hdf5(self.output_path, da_bar, key='bar', mode='a')
foo_checks = self.context.apply(check_hdf5_file,
(self.output_path, foo),
{'dataset': 'foo'},
targets=[self.context.targets[0]])[0]
self.assertTrue(foo_checks)
bar_checks = self.context.apply(check_hdf5_file,
(self.output_path, bar),
{'dataset': 'bar'},
targets=[self.context.targets[0]])[0]
self.assertTrue(bar_checks)
class TestHdf5FileLoad(DefaultContextTestCase):
ntargets = 2
@classmethod
def setUpClass(cls):
cls.h5py = import_or_skip('h5py')
super(TestHdf5FileLoad, cls).setUpClass()
cls.output_path = cls.context.apply(engine_temp_path, ('.hdf5',),
targets=[cls.context.targets[0]])[0]
cls.expected = np.arange(20).reshape(2, 10)
def make_test_file(output_path, arr):
import h5py
with h5py.File(output_path, 'w') as fp:
fp["test"] = arr
cls.context.apply(make_test_file, (cls.output_path, cls.expected),
targets=[cls.context.targets[0]])
@classmethod
def tearDownClass(cls):
cls.context.apply(cleanup_file, (cls.output_path,),
targets=[cls.context.targets[0]])
super(TestHdf5FileLoad, cls).tearDownClass()
def test_load_bn(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
bn_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
def test_load_nc(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nc_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
def test_load_nu(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nu_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
if __name__ == '__main__':
unittest.main(verbosity=2) | distarray/globalapi/tests/test_distributed_io.py | import unittest
import numpy as np
from numpy.testing import assert_array_equal
from distarray.externals.six.moves import range
from distarray.testing import import_or_skip, DefaultContextTestCase
from distarray.globalapi.distarray import DistArray
from distarray.globalapi.maps import Distribution
def cleanup_file(filepath):
import os
if os.path.exists(filepath):
os.remove(filepath)
def engine_temp_path(extension=''):
from distarray.testing import temp_filepath
return temp_filepath(extension)
class TestDnpyFileIO(DefaultContextTestCase):
@classmethod
def setUpClass(cls):
super(TestDnpyFileIO, cls).setUpClass()
cls.distribution = Distribution(cls.context, (100,), dist={0: 'b'})
cls.da = cls.context.empty(cls.distribution)
cls.output_paths = cls.context.apply(engine_temp_path)
def test_save_load_with_filenames(self):
try:
self.context.save_dnpy(self.output_paths, self.da)
db = self.context.load_dnpy(self.output_paths)
self.assertTrue(isinstance(db, DistArray))
self.assertEqual(self.da, db)
finally:
for filepath, target in zip(self.output_paths, self.context.targets):
self.context.apply(cleanup_file, (filepath,), targets=(target,))
def test_save_load_with_prefix(self):
output_path = self.output_paths[0]
try:
self.context.save_dnpy(output_path, self.da)
db = self.context.load_dnpy(output_path)
self.assertTrue(isinstance(db, DistArray))
self.assertEqual(self.da, db)
finally:
for rank in self.context.targets:
filepath = output_path + "_" + str(rank) + ".dnpy"
self.context.apply(cleanup_file, (filepath,), targets=(rank,))
bn_test_data = [
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
'stop': 1,
},
{'size': 10,
'dist_type': 'n',
}),
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
'stop': 2,
},
{'size': 10,
'dist_type': 'n',
})
]
nc_test_data = [
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
},),
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
},)
]
nu_test_data = [
# Note: unstructured indices must be in increasing order
# (restriction of h5py / HDF5)
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'indices': [0, 3, 4, 6, 8],
},
),
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'indices': [1, 2, 5, 7, 9],
},
)
]
class TestNpyFileLoad(DefaultContextTestCase):
"""Try loading a .npy file on the engines.
This test assumes that all engines have access to the same file system.
"""
ntargets = 2
@classmethod
def setUpClass(cls):
super(TestNpyFileLoad, cls).setUpClass()
cls.expected = np.arange(20).reshape(2, 10)
def save_test_file(data):
import numpy
from distarray.testing import temp_filepath
output_path = temp_filepath('.npy')
numpy.save(output_path, data)
return output_path
cls.output_path = cls.context.apply(save_test_file, (cls.expected,),
targets=[cls.context.targets[0]])[0] # noqa
@classmethod
def tearDownClass(cls):
cls.context.apply(cleanup_file, (cls.output_path,),
targets=[cls.context.targets[0]])
super(TestNpyFileLoad, cls).tearDownClass()
def test_load_bn(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
bn_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def test_load_nc(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nc_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def test_load_nu(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nu_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def check_hdf5_file(output_path, expected, dataset="buffer"):
import h5py
import numpy
with h5py.File(output_path, 'r') as fp:
if dataset not in fp:
return False
if not numpy.array_equal(expected, fp[dataset]):
return False
return True
class TestHdf5FileSave(DefaultContextTestCase):
def setUp(self):
super(TestHdf5FileSave, self).setUp()
self.h5py = import_or_skip('h5py')
self.output_path = self.context.apply(engine_temp_path, ('.hdf5',),
targets=[self.context.targets[0]])[0]
def tearDown(self):
self.context.apply(cleanup_file, (self.output_path,),
targets=[self.context.targets[0]])
def test_save_block(self):
datalen = 33
expected = np.arange(datalen)
da = self.context.fromarray(expected)
self.context.save_hdf5(self.output_path, da, mode='w')
file_check = self.context.apply(check_hdf5_file,
(self.output_path, expected),
targets=[self.context.targets[0]])[0]
self.assertTrue(file_check)
def test_save_3d(self):
shape = (4, 5, 3)
expected = np.random.random(shape)
dist = {0: 'b', 1: 'c', 2: 'n'}
distribution = Distribution(self.context, shape, dist=dist)
da = self.context.fromarray(expected, distribution=distribution)
self.context.save_hdf5(self.output_path, da, mode='w')
file_check = self.context.apply(check_hdf5_file,
(self.output_path, expected),
targets=[self.context.targets[0]])[0]
self.assertTrue(file_check)
def test_save_two_datasets(self):
datalen = 33
foo = np.arange(datalen)
bar = np.random.random(datalen)
da_foo = self.context.fromarray(foo)
da_bar = self.context.fromarray(bar)
# save 'foo' to a file
self.context.save_hdf5(self.output_path, da_foo, key='foo', mode='w')
# save 'bar' to a different dataset in the same file
self.context.save_hdf5(self.output_path, da_bar, key='bar', mode='a')
foo_checks = self.context.apply(check_hdf5_file,
(self.output_path, foo),
{'dataset': 'foo'},
targets=[self.context.targets[0]])[0]
self.assertTrue(foo_checks)
bar_checks = self.context.apply(check_hdf5_file,
(self.output_path, bar),
{'dataset': 'bar'},
targets=[self.context.targets[0]])[0]
self.assertTrue(bar_checks)
class TestHdf5FileLoad(DefaultContextTestCase):
ntargets = 2
@classmethod
def setUpClass(cls):
cls.h5py = import_or_skip('h5py')
super(TestHdf5FileLoad, cls).setUpClass()
cls.output_path = cls.context.apply(engine_temp_path, ('.hdf5',),
targets=[cls.context.targets[0]])[0]
cls.expected = np.arange(20).reshape(2, 10)
def make_test_file(output_path, arr):
import h5py
with h5py.File(output_path, 'w') as fp:
fp["test"] = arr
cls.context.apply(make_test_file, (cls.output_path, cls.expected),
targets=[cls.context.targets[0]])
@classmethod
def tearDownClass(cls):
cls.context.apply(cleanup_file, (cls.output_path,),
targets=[cls.context.targets[0]])
super(TestHdf5FileLoad, cls).tearDownClass()
def test_load_bn(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
bn_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
def test_load_nc(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nc_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
def test_load_nu(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nu_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
if __name__ == '__main__':
unittest.main(verbosity=2) | 0.480966 | 0.456652 |
import logging
import time
import numpy as np
from . import utils
from . import optimize
from . import diagnosis
from .samplers import latin_sample_n
log = logging.getLogger(__name__)
class ExcursionProblem(object):
def __init__(self, functions, thresholds = [0.0], ndim = 1, bounding_box = None, plot_npoints = None, invalid_region = None, testdata = None, n_acq = 2000, n_mean = 2000):
self._invalid_region = invalid_region
self.functions = functions
self.thresholds = thresholds
self.bounding_box = np.asarray(bounding_box or [[0,1]]*ndim)
assert len(self.bounding_box) == ndim
self.ndim = ndim
plot_npoints = plot_npoints or [[101 if ndim < 3 else 31]]*ndim
self.plot_rangedef = np.concatenate([self.bounding_box,np.asarray(plot_npoints).reshape(-1,1)],axis=-1)
self.plotG = utils.mgrid(self.plot_rangedef)
self.plotX = utils.mesh2points(self.plotG,self.plot_rangedef[:,2])
self._testdata = testdata
self._nmean = n_acq
self._nacq = n_mean
def testdata(self):
if self._testdata:
return self._testdata
testX = self.plotX[~self.invalid_region(self.plotX)]
testy_list = [func(testX) for func in self.functions]
testdata = testX, testy_list
return testdata
def invalid_region(self,X):
allvalid = lambda X: np.zeros_like(X[:,0], dtype = 'bool')
return self._invalid_region(X) if self._invalid_region else allvalid(X)
def random_points(self,N, seed = None):
np.random.seed(seed)
return latin_sample_n(self, N, self.ndim)
def acqX(self):
return self.random_points(self._nacq)
def meanX(self):
return self.random_points(self._nmean)
class Learner(object):
def __init__(self, scandetails, gp_maker = optimize.get_gp, evaluator = optimize.default_evaluator):
self.scandetails = scandetails
self.gp_maker = gp_maker
self.evaluator = evaluator
self.metrics = []
self.X = np.empty((0,scandetails.ndim))
self.y_list = [np.empty((0,)) for f in scandetails.functions ]
def evaluate_metrics(self):
return diagnosis.diagnose(self.X,self.y_list,self.gps, self.scandetails)
def initialize(self,n_init = 5, seed = None, snapshot = None):
if not snapshot:
self.X, self.y_list, self.gps = optimize.init(
self.scandetails, n_init, seed, self.evaluator,self.gp_maker
)
self.metrics.append(self.evaluate_metrics())
else:
self.X = np.asarray(snapshot['X'])
self.y_list = [np.asarray(y) for y in snapshot['y_list']]
self.gps = [self.gp_maker(self.X,yl) for yl in self.y_list]
self.metrics = snapshot['metrics']
def suggest(self, batchsize = 1, resampling_frac = 0.30):
return optimize.suggest(
self.gps, self.X, self.scandetails,
gp_maker = self.gp_maker, batchsize=batchsize,
resampling_frac = resampling_frac, return_acqvals = False
)
def tell(self, newX, newys_list):
self.X,self.y_list,self.gps = optimize.tell(
self.X, self.y_list, self.scandetails,
newX, newys_list, self.gp_maker
)
self.metrics.append(self.evaluate_metrics())
def evaluate_and_tell(self,newX):
newys_list = self.evaluator(self.scandetails,newX)
self.tell(newX,newys_list) | excursion/__init__.py | import logging
import time
import numpy as np
from . import utils
from . import optimize
from . import diagnosis
from .samplers import latin_sample_n
log = logging.getLogger(__name__)
class ExcursionProblem(object):
def __init__(self, functions, thresholds = [0.0], ndim = 1, bounding_box = None, plot_npoints = None, invalid_region = None, testdata = None, n_acq = 2000, n_mean = 2000):
self._invalid_region = invalid_region
self.functions = functions
self.thresholds = thresholds
self.bounding_box = np.asarray(bounding_box or [[0,1]]*ndim)
assert len(self.bounding_box) == ndim
self.ndim = ndim
plot_npoints = plot_npoints or [[101 if ndim < 3 else 31]]*ndim
self.plot_rangedef = np.concatenate([self.bounding_box,np.asarray(plot_npoints).reshape(-1,1)],axis=-1)
self.plotG = utils.mgrid(self.plot_rangedef)
self.plotX = utils.mesh2points(self.plotG,self.plot_rangedef[:,2])
self._testdata = testdata
self._nmean = n_acq
self._nacq = n_mean
def testdata(self):
if self._testdata:
return self._testdata
testX = self.plotX[~self.invalid_region(self.plotX)]
testy_list = [func(testX) for func in self.functions]
testdata = testX, testy_list
return testdata
def invalid_region(self,X):
allvalid = lambda X: np.zeros_like(X[:,0], dtype = 'bool')
return self._invalid_region(X) if self._invalid_region else allvalid(X)
def random_points(self,N, seed = None):
np.random.seed(seed)
return latin_sample_n(self, N, self.ndim)
def acqX(self):
return self.random_points(self._nacq)
def meanX(self):
return self.random_points(self._nmean)
class Learner(object):
def __init__(self, scandetails, gp_maker = optimize.get_gp, evaluator = optimize.default_evaluator):
self.scandetails = scandetails
self.gp_maker = gp_maker
self.evaluator = evaluator
self.metrics = []
self.X = np.empty((0,scandetails.ndim))
self.y_list = [np.empty((0,)) for f in scandetails.functions ]
def evaluate_metrics(self):
return diagnosis.diagnose(self.X,self.y_list,self.gps, self.scandetails)
def initialize(self,n_init = 5, seed = None, snapshot = None):
if not snapshot:
self.X, self.y_list, self.gps = optimize.init(
self.scandetails, n_init, seed, self.evaluator,self.gp_maker
)
self.metrics.append(self.evaluate_metrics())
else:
self.X = np.asarray(snapshot['X'])
self.y_list = [np.asarray(y) for y in snapshot['y_list']]
self.gps = [self.gp_maker(self.X,yl) for yl in self.y_list]
self.metrics = snapshot['metrics']
def suggest(self, batchsize = 1, resampling_frac = 0.30):
return optimize.suggest(
self.gps, self.X, self.scandetails,
gp_maker = self.gp_maker, batchsize=batchsize,
resampling_frac = resampling_frac, return_acqvals = False
)
def tell(self, newX, newys_list):
self.X,self.y_list,self.gps = optimize.tell(
self.X, self.y_list, self.scandetails,
newX, newys_list, self.gp_maker
)
self.metrics.append(self.evaluate_metrics())
def evaluate_and_tell(self,newX):
newys_list = self.evaluator(self.scandetails,newX)
self.tell(newX,newys_list) | 0.592902 | 0.341418 |
from math import sqrt
class Vector(object):
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates must be an iterable')
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def add(self, v):
self.coordinates = tuple((x+y for x, y in
zip(self.coordinates, v.coordinates)))
def subtract(self, v):
self.coordinates = tuple((x-y for x, y in
zip(self.coordinates, v.coordinates)))
def scalar_multiply(self, s):
self.coordinates = tuple((x*s for x in self.coordinates))
def magnitude(self):
return sqrt(sum((x**2 for x in self.coordinates)))
def normalization(self):
try:
mag = self.magnitude()
self.scalar_multiply(1./mag)
return self
except ZeroDivisionError:
print("Cannot normalize the zero vector.")
if __name__ == '__main__':
vector_1 = Vector((8.218,-9.341))
print(vector_1)
vector_2 = Vector((-1.129,2.111))
print(vector_2)
print("Vector 1 + vector 2:")
vector_1.add(vector_2)
print(vector_1)
vector_3 = Vector((7.119,8.215))
vector_4 = Vector((-8.223,0.878))
print("Vector 3 minus vector 4:")
vector_3.subtract(vector_4)
print(vector_3)
vector_5 = Vector((1.671,-1.012,-0.318))
scalar_1 = 7.41
print("Vector 5 times scalar 1:")
vector_5.scalar_multiply(scalar_1)
print(vector_5)
vector_6_1 = Vector((-0.221, 7.437))
print("vector_6_1.magnitude: " + str(round(vector_6_1.magnitude(),3)))
vector_6_2 = Vector((8.813, -1.331, -6.247))
print("vector_6_2.magnitude: " + str(round(vector_6_2.magnitude(),3)))
vector_6_3 = Vector((5.581, -2.136))
print("vector_6_3.normalization(): " + str(vector_6_3.normalization()))
vector_6_4 = Vector((1.996, 3.108, -4.554))
print("vector_6_4.normalization(): " + str(vector_6_4.normalization())) | algebra.py | from math import sqrt
class Vector(object):
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates must be an iterable')
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def add(self, v):
self.coordinates = tuple((x+y for x, y in
zip(self.coordinates, v.coordinates)))
def subtract(self, v):
self.coordinates = tuple((x-y for x, y in
zip(self.coordinates, v.coordinates)))
def scalar_multiply(self, s):
self.coordinates = tuple((x*s for x in self.coordinates))
def magnitude(self):
return sqrt(sum((x**2 for x in self.coordinates)))
def normalization(self):
try:
mag = self.magnitude()
self.scalar_multiply(1./mag)
return self
except ZeroDivisionError:
print("Cannot normalize the zero vector.")
if __name__ == '__main__':
vector_1 = Vector((8.218,-9.341))
print(vector_1)
vector_2 = Vector((-1.129,2.111))
print(vector_2)
print("Vector 1 + vector 2:")
vector_1.add(vector_2)
print(vector_1)
vector_3 = Vector((7.119,8.215))
vector_4 = Vector((-8.223,0.878))
print("Vector 3 minus vector 4:")
vector_3.subtract(vector_4)
print(vector_3)
vector_5 = Vector((1.671,-1.012,-0.318))
scalar_1 = 7.41
print("Vector 5 times scalar 1:")
vector_5.scalar_multiply(scalar_1)
print(vector_5)
vector_6_1 = Vector((-0.221, 7.437))
print("vector_6_1.magnitude: " + str(round(vector_6_1.magnitude(),3)))
vector_6_2 = Vector((8.813, -1.331, -6.247))
print("vector_6_2.magnitude: " + str(round(vector_6_2.magnitude(),3)))
vector_6_3 = Vector((5.581, -2.136))
print("vector_6_3.normalization(): " + str(vector_6_3.normalization()))
vector_6_4 = Vector((1.996, 3.108, -4.554))
print("vector_6_4.normalization(): " + str(vector_6_4.normalization())) | 0.731059 | 0.594316 |
from __future__ import division
import os
import sys
import tempfile
from scipy import stats
from math import isnan, isinf
from numpy import isclose
from subprocess import check_call, call
root = os.path.abspath( sys.argv[ 0 ] )
for i in range( 3 ):
root = os.path.dirname( root )
sys.path.append( os.path.join( root, "lib" ) )
from testutil import Multitmp, ParallelTest
from util import mktemp
root = os.path.abspath( sys.argv[ 0 ] )
for i in range( 3 ):
root = os.path.dirname( root )
sys.path.append( os.path.join( root, "lib" ) )
from testutil import ParallelTest
# Run test.py -h to get usage information
class FerretTest( ParallelTest ):
def __init__( self, *args ):
self.__backup = None
ParallelTest.__init__( self, *args )
def __del__( self ):
if self.__backup is not None:
call( [ "rm", "-rf", self.__backup ] )
def _getInputDir( self ):
return {
"test": "inputs/input_test",
"tiny": "inputs/input_dev",
"small": "inputs/input_small",
"medium": "inputs/input_medium",
"large": "inputs/input_large",
"huge": "inputs/input_native",
}[ self.size ]
def checkArgs( self, *args ):
result = ParallelTest.checkArgs( self, *args )
self.__backup = tempfile.mkdtemp()
check_call( [
"rsync", "-a", self._getInputDir() + "/", self.__backup
] )
return result
def getCommand( self, outfile ):
inputdir = self._getInputDir()
cmd = [ self.exe, "%s/corel" % inputdir, "lsh", "%s/queries" % inputdir ]
cmd += {
"test": [ "5", "5", "1" ],
"tiny": [ "5", "5", "1" ],
"small": [ "10", "20", "1" ],
"medium": [ "10", "20", "1" ],
"large": [ "10", "20", "1" ],
"huge": [ "50", "20", "1" ],
}[ self.size ]
cmd += [ outfile ]
return cmd, dict()
def readFile( self, infile ):
queries = list()
with open( infile ) as fh:
for i, line in enumerate( fh ):
cur_rank = list()
terms = line.split()
if len( terms ) == 0:
queries.append( ( "", i, [] ) )
continue
query = terms[0]
ranks = terms[1:]
for j, rank in enumerate( ranks ):
terms = rank.split(":")
if len( terms ) != 2:
cur_rank.append( ( "", j, 0.0 ) )
continue
image, rank = terms
try:
rank = float( rank )
except ValueError:
cur_rank.append( ( "", j, 0.0 ) )
continue
if not isnan( rank ):
cur_rank.append( ( image, j, rank ) )
cur_rank.sort()
queries.append( ( query, i, cur_rank ) )
queries.sort()
return queries
def comm( self, l1, l2 ):
i, j = 0, 0
a, b = list(), list()
a_extra, b_extra = list(), list()
while i < len( l1 ) and j < len( l2 ):
if l1[ i ][ 0 ] == l2[ j ][ 0 ]:
a.append( l1[ i ][ 1: ] )
b.append( l2[ j ][ 1: ] )
i += 1
j += 1
elif l1[ i ][ 0 ] < l2[ j ][ 0 ]:
a_extra.append( l1[ i ][ 1: ] )
i += 1
else:
b_extra.append( l2[ j ][ 1: ] )
j += 1
while i < len( l1 ):
a_extra.append( l1[ i ][ 1: ] )
i += 1
while j < len( l2 ):
b_extra.append( l2[ j ][ 1: ] )
j += 1
return ( a_extra, a, b, b_extra )
def validateCorrectness( self, outfile ):
correctness = ParallelTest.validateCorrectness( self, outfile )
if not correctness:
return False
if self.options.error:
golden = self.getGolden()
gold_queries = self.readFile( golden )
errors = list()
# Error function for missing/extra things. Max good is 0, max bad is 1
def errorFun( missing, extra ):
return 1 - ( 1 / ( 2 + ( 2 * missing ) ) ) - ( 1 / ( 2 + ( 2 * extra ) ) )
for fname in outfile:
# No output file is max error
if not os.path.isfile(fname):
return False
test_queries = dict()
test_queries = self.readFile( fname )
t1 = 0 # Kendall tau penalty for weights
t2 = 0 # Kendall tau penalty for rank output order
t3 = 0 # Kendall tau penalty for query output order
w = 0 # Weighting error penalty
r = 0 # Penalty for missing or extra ranks
extra_queries, test_queries_int, gold_queries_int, missing_queries = \
self.comm( test_queries, gold_queries )
# Penalty for missing or extra queries
q = errorFun( len( missing_queries ), len( extra_queries ) )
if len( gold_queries_int ) < 2:
t3 = 1
else:
t3, _ = stats.kendalltau( [ x for x, _ in test_queries_int ],
[ x for x, _ in gold_queries_int ] )
t3 = (.5) - (.5 * t3) # Change tau scale to [0-1] where 0 is good
if isclose( t3, 0 ):
t3 = 0.0
for test_query, gold_query in zip( test_queries_int, gold_queries_int ):
extra_ranks, test_ranks_int, gold_ranks_int, missing_ranks = \
self.comm( test_query[1], gold_query[1] )
r += errorFun( len( missing_ranks ), len( extra_ranks ) )
if len( gold_ranks_int ) < 2:
tau1 = 1
else:
tau1, _ = stats.kendalltau( [x for _, x in gold_ranks_int],
[x for _, x in test_ranks_int] )
tau1 = (.5) - (.5 * tau1) # Change tau scale to [0-1] where 0 is good
if isclose( tau1, 0 ):
tau1 = 0.0
absolute_error = sum( [ abs( a[1] - b[1] ) for a, b in
zip( gold_ranks_int, test_ranks_int ) ] )
if isnan( absolute_error ) or isinf( absolute_error ):
w += 1
else:
w += absolute_error / ( absolute_error + 1 )
t1 += tau1
if len( gold_ranks_int ) < 2:
tau2 = 1
else:
tau2, _ = stats.kendalltau( [x for x, _ in gold_ranks_int],
[x for x, _ in test_ranks_int] )
tau2 = (.5) - (.5 * tau2) # Change tau scale to [0-1] where 0 is good
if isclose( tau2, 0 ):
tau2 = 0.0
t2 += tau2
t1 = t1 / len( gold_queries_int )
t2 = t2 / len( gold_queries_int )
w = w / len( gold_queries_int )
r = r / len( gold_queries_int )
error = 1000 * q + 100 * r + 10 * t1 + 10 * t2 + 5 * t3 + w
errors.append( 1 / ( error + 1 ) )
self.error = errors
return True
else:
return correctness
def diff( self, golden, actual):
self.error = list()
if self.options.error:
return True
else:
return ParallelTest.diff( self, golden, actual )
def getParallelFitness( self, root, metrics ):
results = ParallelTest.getParallelFitness( self, root, metrics )
check_call( [
"rsync", "-a", self.__backup + "/", self._getInputDir()
])
if self.options.error:
if results == [ [ 0 ] ]:
return [ [ 0 ], [ 0 ] ]
results.append( self.error )
return results
else:
return results
FerretTest().run( root )
# exit non-zero so GenProg won't consider this a "repair"
exit( 1 ) | benchmarks/ferret/test.py | from __future__ import division
import os
import sys
import tempfile
from scipy import stats
from math import isnan, isinf
from numpy import isclose
from subprocess import check_call, call
root = os.path.abspath( sys.argv[ 0 ] )
for i in range( 3 ):
root = os.path.dirname( root )
sys.path.append( os.path.join( root, "lib" ) )
from testutil import Multitmp, ParallelTest
from util import mktemp
root = os.path.abspath( sys.argv[ 0 ] )
for i in range( 3 ):
root = os.path.dirname( root )
sys.path.append( os.path.join( root, "lib" ) )
from testutil import ParallelTest
# Run test.py -h to get usage information
class FerretTest( ParallelTest ):
def __init__( self, *args ):
self.__backup = None
ParallelTest.__init__( self, *args )
def __del__( self ):
if self.__backup is not None:
call( [ "rm", "-rf", self.__backup ] )
def _getInputDir( self ):
return {
"test": "inputs/input_test",
"tiny": "inputs/input_dev",
"small": "inputs/input_small",
"medium": "inputs/input_medium",
"large": "inputs/input_large",
"huge": "inputs/input_native",
}[ self.size ]
def checkArgs( self, *args ):
result = ParallelTest.checkArgs( self, *args )
self.__backup = tempfile.mkdtemp()
check_call( [
"rsync", "-a", self._getInputDir() + "/", self.__backup
] )
return result
def getCommand( self, outfile ):
inputdir = self._getInputDir()
cmd = [ self.exe, "%s/corel" % inputdir, "lsh", "%s/queries" % inputdir ]
cmd += {
"test": [ "5", "5", "1" ],
"tiny": [ "5", "5", "1" ],
"small": [ "10", "20", "1" ],
"medium": [ "10", "20", "1" ],
"large": [ "10", "20", "1" ],
"huge": [ "50", "20", "1" ],
}[ self.size ]
cmd += [ outfile ]
return cmd, dict()
def readFile( self, infile ):
queries = list()
with open( infile ) as fh:
for i, line in enumerate( fh ):
cur_rank = list()
terms = line.split()
if len( terms ) == 0:
queries.append( ( "", i, [] ) )
continue
query = terms[0]
ranks = terms[1:]
for j, rank in enumerate( ranks ):
terms = rank.split(":")
if len( terms ) != 2:
cur_rank.append( ( "", j, 0.0 ) )
continue
image, rank = terms
try:
rank = float( rank )
except ValueError:
cur_rank.append( ( "", j, 0.0 ) )
continue
if not isnan( rank ):
cur_rank.append( ( image, j, rank ) )
cur_rank.sort()
queries.append( ( query, i, cur_rank ) )
queries.sort()
return queries
def comm( self, l1, l2 ):
i, j = 0, 0
a, b = list(), list()
a_extra, b_extra = list(), list()
while i < len( l1 ) and j < len( l2 ):
if l1[ i ][ 0 ] == l2[ j ][ 0 ]:
a.append( l1[ i ][ 1: ] )
b.append( l2[ j ][ 1: ] )
i += 1
j += 1
elif l1[ i ][ 0 ] < l2[ j ][ 0 ]:
a_extra.append( l1[ i ][ 1: ] )
i += 1
else:
b_extra.append( l2[ j ][ 1: ] )
j += 1
while i < len( l1 ):
a_extra.append( l1[ i ][ 1: ] )
i += 1
while j < len( l2 ):
b_extra.append( l2[ j ][ 1: ] )
j += 1
return ( a_extra, a, b, b_extra )
def validateCorrectness( self, outfile ):
correctness = ParallelTest.validateCorrectness( self, outfile )
if not correctness:
return False
if self.options.error:
golden = self.getGolden()
gold_queries = self.readFile( golden )
errors = list()
# Error function for missing/extra things. Max good is 0, max bad is 1
def errorFun( missing, extra ):
return 1 - ( 1 / ( 2 + ( 2 * missing ) ) ) - ( 1 / ( 2 + ( 2 * extra ) ) )
for fname in outfile:
# No output file is max error
if not os.path.isfile(fname):
return False
test_queries = dict()
test_queries = self.readFile( fname )
t1 = 0 # Kendall tau penalty for weights
t2 = 0 # Kendall tau penalty for rank output order
t3 = 0 # Kendall tau penalty for query output order
w = 0 # Weighting error penalty
r = 0 # Penalty for missing or extra ranks
extra_queries, test_queries_int, gold_queries_int, missing_queries = \
self.comm( test_queries, gold_queries )
# Penalty for missing or extra queries
q = errorFun( len( missing_queries ), len( extra_queries ) )
if len( gold_queries_int ) < 2:
t3 = 1
else:
t3, _ = stats.kendalltau( [ x for x, _ in test_queries_int ],
[ x for x, _ in gold_queries_int ] )
t3 = (.5) - (.5 * t3) # Change tau scale to [0-1] where 0 is good
if isclose( t3, 0 ):
t3 = 0.0
for test_query, gold_query in zip( test_queries_int, gold_queries_int ):
extra_ranks, test_ranks_int, gold_ranks_int, missing_ranks = \
self.comm( test_query[1], gold_query[1] )
r += errorFun( len( missing_ranks ), len( extra_ranks ) )
if len( gold_ranks_int ) < 2:
tau1 = 1
else:
tau1, _ = stats.kendalltau( [x for _, x in gold_ranks_int],
[x for _, x in test_ranks_int] )
tau1 = (.5) - (.5 * tau1) # Change tau scale to [0-1] where 0 is good
if isclose( tau1, 0 ):
tau1 = 0.0
absolute_error = sum( [ abs( a[1] - b[1] ) for a, b in
zip( gold_ranks_int, test_ranks_int ) ] )
if isnan( absolute_error ) or isinf( absolute_error ):
w += 1
else:
w += absolute_error / ( absolute_error + 1 )
t1 += tau1
if len( gold_ranks_int ) < 2:
tau2 = 1
else:
tau2, _ = stats.kendalltau( [x for x, _ in gold_ranks_int],
[x for x, _ in test_ranks_int] )
tau2 = (.5) - (.5 * tau2) # Change tau scale to [0-1] where 0 is good
if isclose( tau2, 0 ):
tau2 = 0.0
t2 += tau2
t1 = t1 / len( gold_queries_int )
t2 = t2 / len( gold_queries_int )
w = w / len( gold_queries_int )
r = r / len( gold_queries_int )
error = 1000 * q + 100 * r + 10 * t1 + 10 * t2 + 5 * t3 + w
errors.append( 1 / ( error + 1 ) )
self.error = errors
return True
else:
return correctness
def diff( self, golden, actual):
self.error = list()
if self.options.error:
return True
else:
return ParallelTest.diff( self, golden, actual )
def getParallelFitness( self, root, metrics ):
results = ParallelTest.getParallelFitness( self, root, metrics )
check_call( [
"rsync", "-a", self.__backup + "/", self._getInputDir()
])
if self.options.error:
if results == [ [ 0 ] ]:
return [ [ 0 ], [ 0 ] ]
results.append( self.error )
return results
else:
return results
FerretTest().run( root )
# exit non-zero so GenProg won't consider this a "repair"
exit( 1 ) | 0.225161 | 0.182644 |
import os
# This is needed to make daily_workflow work
os.environ["TZ"] = "UTC"
import gzip
import functools
import json
import random
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
from base64 import b64encode
import psycopg2
from centrifugation import httpt_body, exc_hash, pop_values, ChecksummingTee, NopTeeFd
from oonipl.pg import PGCopyFrom, pg_quote, _pg_unquote
def _httpt_body(body, te=None):
d = {"body": body, "headers": {}}
if te is not None:
d["headers"]["TrAnSfEr-EnCoDiNg"] = te
return httpt_body(d)
class TestChunked(unittest.TestCase):
def test_empty(self):
self.assertEqual(_httpt_body(""), "")
self.assertEqual(_httpt_body("0\r\n\r\n", "chunked"), "")
def test_chunked(self):
self.assertEqual(
_httpt_body(
"4\r\nasdf\r\n" "3\r\nqwe\r\n" "2\r\nzx\r\n" "0\r\n\r\n", "chunked"
),
"asdfqwezx",
)
self.assertEqual(
_httpt_body(
u"2\r\nzx\r\n"
u"8\r\nпсой\r\n" # NB: 8 bytes for 4 symbols!
u"0\r\n\r\n",
"chunked",
),
u"zxпсой".encode("utf-8"),
)
def test_broken(self):
raw = "2\r\nzx\r\n" "7\r\nFilast\xf2\r\n" "0\r\n\r\n"
self.assertEqual(_httpt_body(raw, "chunked"), "zxFilast\xf2")
# NB: can't be properly de-chunked after <meta/> charset decoding
uni = raw.decode("ISO-8859-1")
self.assertEqual(_httpt_body(uni, "chunked"), uni.encode("utf-8"))
class TestPGQuoting(unittest.TestCase):
def test_bool(self):
self.assertEqual(pg_quote(True), "TRUE")
self.assertEqual(pg_quote(False), "FALSE")
def test_bits(self):
blob = u"".join(map(unichr, xrange(1, 256))).encode("utf-8")
self.assertEqual(blob, _pg_unquote(pg_quote(blob)))
self.assertEqual(u"\ufffd".encode("utf-8"), pg_quote(u"\u0000"))
self.assertEqual(u"\ufffd".encode("utf-8"), pg_quote("\0"))
def test_ugly(self):
blob = r"\\n"
self.assertEqual(blob, _pg_unquote(pg_quote(blob)))
PG = os.getenv("UNITTEST_PG")
class TestE2EQuoting(unittest.TestCase):
# Run like that:
# $ UNITTEST_PG='host=spbmeta user=postgres' python test_centrifugation.py TestE2EQuoting
def setUp(self):
self.conn = psycopg2.connect(dsn=PG)
self.table = "tmptbl" + "".join(
random.choice(string.lowercase) for _ in xrange(6)
)
def tearDown(self):
self.conn.close()
@unittest.skipUnless(PG, "No PostgreSQL")
def test_string(self):
for mapfn in (chr, unichr):
with self.conn, self.conn.cursor() as c:
c.execute(
"CREATE TEMPORARY TABLE {} (nnn int2, ccc text) ON COMMIT DROP".format(
self.table
)
)
dest = PGCopyFrom(self.conn, self.table, wbufsize=64)
for i in xrange(1, 128): # postgresql does not support \x00 in body
dest.write("{:d}\t{}\n".format(i, pg_quote(mapfn(i))))
dest.close()
c.execute("SELECT * FROM {}".format(self.table))
nrows = 0
for nnn, ccc in c:
self.assertEqual(nnn, ord(ccc))
nrows += 1
self.assertEqual(nrows, 127)
@unittest.skipUnless(PG, "No PostgreSQL")
def test_array(self):
for mapfn in (chr, unichr):
with self.conn, self.conn.cursor() as c:
c.execute(
"CREATE TEMPORARY TABLE {} (nnn int2, ccc text[]) ON COMMIT DROP".format(
self.table
)
)
dest = PGCopyFrom(self.conn, self.table, wbufsize=64)
for i in xrange(1, 128): # postgresql does not support \x00 in body
dest.write(
"{:d}\t{}\n".format(
i, pg_quote([mapfn(i), "a", mapfn(i), "b", mapfn(i)])
)
)
dest.close()
c.execute("SELECT * FROM {}".format(self.table))
nrows = 0
for nnn, ccc in c:
self.assertEqual(
ccc, [mapfn(nnn), "a", mapfn(nnn), "b", mapfn(nnn)]
)
nrows += 1
self.assertEqual(nrows, 127)
@unittest.skipUnless(PG, "No PostgreSQL")
def test_badrow_sink(self):
for rowno in xrange(9):
for ver in xrange(1 << rowno):
isgood = bin(ver)[2:].rjust(rowno, "0") if rowno else ""
with self.conn, self.conn.cursor() as c:
c.execute(
"CREATE TEMPORARY TABLE good{} (t text) ON COMMIT DROP".format(
self.table
)
)
c.execute(
"CREATE TEMPORARY TABLE bad{} (tbl text, code_ver integer, datum bytea) ON COMMIT DROP".format(
self.table
)
)
bad = PGCopyFrom(self.conn, "bad" + self.table)
good = PGCopyFrom(self.conn, "good" + self.table, badsink=bad)
for ndx, digit in enumerate(isgood):
row = ("<>{}<>" if digit == "1" else "<>{}<\0>").format(ndx)
good.write(row + "\n")
good.close()
bad.close()
# okay, let's check
c.execute("SELECT t FROM good{}".format(self.table))
good_set = set(_[0] for _ in c)
c.execute("SELECT datum FROM bad{}".format(self.table))
bad_blob = "|".join(str(_[0]) for _ in c)
# print rowno, ver, repr(isgood), good_set, repr(bad_blob)
for ndx, digit in enumerate(isgood):
row = ("<>{}<>" if digit == "1" else "<>{}<\0>").format(ndx)
if digit == "1":
self.assertIn(row, good_set)
else:
self.assertIn(row, bad_blob)
# COMMIT
class TestPartialReprocessing(unittest.TestCase):
# Run like that:
# $ UNITTEST_PG='host=spbmeta user=oopguser' python test_centrifugation.py TestPartialReprocessing
def setUp(self):
self.conn = psycopg2.connect(dsn=PG)
self.tmpdir = None
self.mkdtemp(None)
def tearDown(self):
self.conn.close()
shutil.rmtree(self.tmpdir)
self.tmpdir = None
def mkdtemp(self, bucket):
if self.tmpdir is not None:
shutil.rmtree(self.tmpdir)
self.tmpdir = None
self.tmpdir = tempfile.mkdtemp()
if bucket is not None:
os.mkdir(os.path.join(self.tmpdir, bucket))
@staticmethod
def take_part_of_bucket(acroot, bucket):
files = os.listdir(os.path.join(acroot, bucket))
# some filtering to speedup processing and reduce data volume
httpreq_blob = sorted(
_ for _ in files if "-http_requests-" in _ and _.endswith(".lz4")
)
webcon_blob = sorted(
_ for _ in files if "-web_connectivity-" in _ and _.endswith(".lz4")
)
webcon_tar = sorted(
_
for _ in files
if _.startswith("web_connectivity.") and _.endswith(".tar.lz4")
) # OONI Probe
questionable = set(httpreq_blob + webcon_blob + webcon_tar)
to_keep = set(webcon_blob[:2] + webcon_tar[:2] + httpreq_blob[:2])
modify = {webcon_tar[0], webcon_blob[0]}
add = {webcon_tar[2]}
files = {
_
for _ in files
if _.endswith(".lz4") and (_ not in questionable or _ in to_keep)
}
return files, modify, add
@staticmethod
def calc_cksum(fname):
with open(fname, "r") as fd:
fd = ChecksummingTee(fd, NopTeeFd)
for _ in iter(functools.partial(fd.read, 4096), ""):
pass
cksum = (fd.size, fd.crc32, fd.sha1)
return cksum
def test_reprocessing(self):
acroot, bucket = "/srv/autoclaved", "2017-08-28"
with self.conn, self.conn.cursor() as c:
# almost every table besides `fingerprint`
c.execute(
"TRUNCATE TABLE autoclaved, badmeta, badrow, dns_a, domain, http_control, http_request, http_request_fp, http_verdict, input, label, measurement, report, residual, software, tcp, vanilla_tor"
)
files, modify, add = self.take_part_of_bucket(acroot, bucket)
# create usual bucket
self.mkdtemp(bucket)
for f in files:
os.symlink(
os.path.join(acroot, bucket, f), os.path.join(self.tmpdir, bucket, f)
)
self.make_bucket_index(acroot, bucket, files, {})
rc, _ = self.call_centrifugation("basic import")
assert rc == 0, "centrifugation failed"
self.retry_centrifugation()
with self.conn, self.conn.cursor() as c:
c.execute(
"""UPDATE autoclaved SET code_ver = 0
WHERE filename = '2017-08-28/web_connectivity.00.tar.lz4'"""
)
magic = self.get_magic_numbers(c, "2017-08-28/web_connectivity.00.tar.lz4")
rc, _ = self.call_centrifugation("reprocess: UPDATE SET code_ver = 0")
assert rc == 0
self.retry_centrifugation()
with self.conn, self.conn.cursor() as c:
self.assertEqual(
magic,
self.get_magic_numbers(c, "2017-08-28/web_connectivity.00.tar.lz4"),
)
# TODO: there is no easy way to verify that no rows were touched as `xmin` is not naive "trx id"
LZ4_EMPTY_FRAME = "\x04\x22\x4d\x18\x64\x40\xa7\x00\x00\x00\x00\x05\x5d\xcc\x02"
addendum = add.pop()
shutil.copyfile(
os.path.join(acroot, bucket, addendum),
os.path.join(self.tmpdir, bucket, addendum),
)
with open(os.path.join(self.tmpdir, bucket, addendum), "a") as fd:
fd.write(LZ4_EMPTY_FRAME)
self.make_bucket_index(acroot, bucket, files | {addendum}, {})
rc, _ = self.call_centrifugation("cksum mismatch")
assert rc == 1, "Hashsum mismatch should lead to failure"
self.make_bucket_index(
acroot,
bucket,
files | {addendum},
{addendum: self.calc_cksum(os.path.join(self.tmpdir, bucket, addendum))},
)
rc, _ = self.call_centrifugation("new file added")
assert rc == 0
self.retry_centrifugation()
self.make_bucket_index(acroot, bucket, files, {})
rc, _ = self.call_centrifugation("file removed")
assert rc == 1, "File removal should lead to failure"
alter = modify.pop()
os.unlink(os.path.join(self.tmpdir, bucket, alter))
shutil.copyfile(
os.path.join(acroot, bucket, alter),
os.path.join(self.tmpdir, bucket, alter),
)
with open(os.path.join(self.tmpdir, bucket, alter), "a") as fd:
fd.write(LZ4_EMPTY_FRAME)
self.make_bucket_index(
acroot,
bucket,
files | {addendum, alter},
{
addendum: self.calc_cksum(os.path.join(self.tmpdir, bucket, addendum)),
alter: self.calc_cksum(os.path.join(self.tmpdir, bucket, alter)),
},
)
with self.conn, self.conn.cursor() as c:
magic = self.get_magic_numbers(c, "{}/{}".format(bucket, alter))
rc, _ = self.call_centrifugation("one altered file")
assert rc == 0
self.retry_centrifugation()
with self.conn, self.conn.cursor() as c:
self.assertEqual(
magic, self.get_magic_numbers(c, "{}/{}".format(bucket, alter))
)
def get_magic_numbers(self, c, filename):
c.execute(
"SELECT autoclaved_no FROM autoclaved WHERE filename = %s", [filename]
)
autoclaved_no = sorted(_[0] for _ in c)
c.execute(
"""SELECT report_no
FROM autoclaved
JOIN report USING (autoclaved_no)
WHERE filename = %s""",
[filename],
)
report_no = sorted(_[0] for _ in c)
c.execute(
"""SELECT msm_no
FROM autoclaved
JOIN report USING (autoclaved_no)
JOIN measurement USING (report_no)
WHERE filename = %s""",
[filename],
)
measurement_no = sorted(_[0] for _ in c)
return autoclaved_no, report_no, measurement_no
def retry_centrifugation(self):
# re-try over same bucket once again
rc, delay = self.call_centrifugation("last state retry")
assert rc == 0, "centrifugation failed"
assert delay < 1, "Ultra-slow retry"
def call_centrifugation(self, msg):
print " ==> centrifugation.py:", msg
start = time.time()
rc = subprocess.call(
[
"./centrifugation.py",
"--start",
"2017-08-28T00:00:00",
"--end",
"2017-08-29T00:00:00",
"--autoclaved-root",
self.tmpdir,
"--postgres",
PG,
]
)
end = time.time()
print " ^^^ centrifugation.py: rc = {:d}, duration = {:1f} sec".format(
rc, end - start
)
return rc, end - start
def make_bucket_index(self, acroot, bucket, files, cksum):
files = {"{}/{}".format(bucket, f) for f in files}
cksum = {"{}/{}".format(bucket, f): v for f, v in cksum.iteritems()}
with gzip.GzipFile(
os.path.join(acroot, bucket, "index.json.gz"), "r"
) as src, gzip.GzipFile(
os.path.join(self.tmpdir, bucket, "index.json.gz"), "w"
) as dst:
use, filename = False, None
for line in src:
if 'file"' in line:
doc = json.loads(line)
if doc["type"] == "file":
filename = doc["filename"]
use = filename in files
if use:
dst.write(line)
elif doc["type"] == "/file":
if not use:
pass
elif filename in cksum:
doc["file_size"], doc["file_crc32"], doc[
"file_sha1"
] = cksum[filename]
doc["file_sha1"] = b64encode(doc["file_sha1"])
dst.write(json.dumps(doc) + "\n")
else:
dst.write(line)
use, filename = False, None
else:
raise RuntimeError("BUG: malicious data not handled")
elif use:
dst.write(line)
class TextExcHash(unittest.TestCase):
def deep_throw(self):
import socket
socket.create_connection(("127.126.125.124", 1)) # ECONREFUSED
def test_stack_dependence(self):
try:
self.deep_throw()
except Exception:
einfo1 = sys.exc_info()
# NB: functools.partial does not add stack frame
self.deep_throw = lambda fn=self.deep_throw: fn()
try:
self.deep_throw()
except Exception:
einfo2 = sys.exc_info()
self.assertEqual(einfo1[0], einfo2[0]) # same type
self.assertEqual(einfo1[1].args, einfo2[1].args) # almost same value
self.assertNotEqual(exc_hash(einfo1), exc_hash(einfo2))
def test_line_independence(self):
try:
self.deep_throw()
except Exception:
einfo1 = sys.exc_info()
try:
self.deep_throw()
except Exception:
einfo2 = sys.exc_info()
self.assertNotEqual(einfo1, einfo2)
self.assertEqual(exc_hash(einfo1), exc_hash(einfo2))
class TestPopValues(unittest.TestCase):
def test_list_of_dicts(self):
self.assertEqual(pop_values({"a": [{}, {}, {}]}), {})
self.assertEqual(pop_values({"a": [{"q": None}, {}, {}]}), {"a": [{"q": None}]})
self.assertEqual(
pop_values({"a": [{}, {"q": None}, {}]}), {"a": [{}, {"q": None}]}
)
self.assertEqual(
pop_values({"a": [{}, {}, {"q": None}]}), {"a": [{}, {}, {"q": None}]}
)
if __name__ == "__main__":
unittest.main() | af/shovel/test_centrifugation.py |
import os
# This is needed to make daily_workflow work
os.environ["TZ"] = "UTC"
import gzip
import functools
import json
import random
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
from base64 import b64encode
import psycopg2
from centrifugation import httpt_body, exc_hash, pop_values, ChecksummingTee, NopTeeFd
from oonipl.pg import PGCopyFrom, pg_quote, _pg_unquote
def _httpt_body(body, te=None):
d = {"body": body, "headers": {}}
if te is not None:
d["headers"]["TrAnSfEr-EnCoDiNg"] = te
return httpt_body(d)
class TestChunked(unittest.TestCase):
def test_empty(self):
self.assertEqual(_httpt_body(""), "")
self.assertEqual(_httpt_body("0\r\n\r\n", "chunked"), "")
def test_chunked(self):
self.assertEqual(
_httpt_body(
"4\r\nasdf\r\n" "3\r\nqwe\r\n" "2\r\nzx\r\n" "0\r\n\r\n", "chunked"
),
"asdfqwezx",
)
self.assertEqual(
_httpt_body(
u"2\r\nzx\r\n"
u"8\r\nпсой\r\n" # NB: 8 bytes for 4 symbols!
u"0\r\n\r\n",
"chunked",
),
u"zxпсой".encode("utf-8"),
)
def test_broken(self):
raw = "2\r\nzx\r\n" "7\r\nFilast\xf2\r\n" "0\r\n\r\n"
self.assertEqual(_httpt_body(raw, "chunked"), "zxFilast\xf2")
# NB: can't be properly de-chunked after <meta/> charset decoding
uni = raw.decode("ISO-8859-1")
self.assertEqual(_httpt_body(uni, "chunked"), uni.encode("utf-8"))
class TestPGQuoting(unittest.TestCase):
def test_bool(self):
self.assertEqual(pg_quote(True), "TRUE")
self.assertEqual(pg_quote(False), "FALSE")
def test_bits(self):
blob = u"".join(map(unichr, xrange(1, 256))).encode("utf-8")
self.assertEqual(blob, _pg_unquote(pg_quote(blob)))
self.assertEqual(u"\ufffd".encode("utf-8"), pg_quote(u"\u0000"))
self.assertEqual(u"\ufffd".encode("utf-8"), pg_quote("\0"))
def test_ugly(self):
blob = r"\\n"
self.assertEqual(blob, _pg_unquote(pg_quote(blob)))
PG = os.getenv("UNITTEST_PG")
class TestE2EQuoting(unittest.TestCase):
# Run like that:
# $ UNITTEST_PG='host=spbmeta user=postgres' python test_centrifugation.py TestE2EQuoting
def setUp(self):
self.conn = psycopg2.connect(dsn=PG)
self.table = "tmptbl" + "".join(
random.choice(string.lowercase) for _ in xrange(6)
)
def tearDown(self):
self.conn.close()
@unittest.skipUnless(PG, "No PostgreSQL")
def test_string(self):
for mapfn in (chr, unichr):
with self.conn, self.conn.cursor() as c:
c.execute(
"CREATE TEMPORARY TABLE {} (nnn int2, ccc text) ON COMMIT DROP".format(
self.table
)
)
dest = PGCopyFrom(self.conn, self.table, wbufsize=64)
for i in xrange(1, 128): # postgresql does not support \x00 in body
dest.write("{:d}\t{}\n".format(i, pg_quote(mapfn(i))))
dest.close()
c.execute("SELECT * FROM {}".format(self.table))
nrows = 0
for nnn, ccc in c:
self.assertEqual(nnn, ord(ccc))
nrows += 1
self.assertEqual(nrows, 127)
@unittest.skipUnless(PG, "No PostgreSQL")
def test_array(self):
for mapfn in (chr, unichr):
with self.conn, self.conn.cursor() as c:
c.execute(
"CREATE TEMPORARY TABLE {} (nnn int2, ccc text[]) ON COMMIT DROP".format(
self.table
)
)
dest = PGCopyFrom(self.conn, self.table, wbufsize=64)
for i in xrange(1, 128): # postgresql does not support \x00 in body
dest.write(
"{:d}\t{}\n".format(
i, pg_quote([mapfn(i), "a", mapfn(i), "b", mapfn(i)])
)
)
dest.close()
c.execute("SELECT * FROM {}".format(self.table))
nrows = 0
for nnn, ccc in c:
self.assertEqual(
ccc, [mapfn(nnn), "a", mapfn(nnn), "b", mapfn(nnn)]
)
nrows += 1
self.assertEqual(nrows, 127)
@unittest.skipUnless(PG, "No PostgreSQL")
def test_badrow_sink(self):
for rowno in xrange(9):
for ver in xrange(1 << rowno):
isgood = bin(ver)[2:].rjust(rowno, "0") if rowno else ""
with self.conn, self.conn.cursor() as c:
c.execute(
"CREATE TEMPORARY TABLE good{} (t text) ON COMMIT DROP".format(
self.table
)
)
c.execute(
"CREATE TEMPORARY TABLE bad{} (tbl text, code_ver integer, datum bytea) ON COMMIT DROP".format(
self.table
)
)
bad = PGCopyFrom(self.conn, "bad" + self.table)
good = PGCopyFrom(self.conn, "good" + self.table, badsink=bad)
for ndx, digit in enumerate(isgood):
row = ("<>{}<>" if digit == "1" else "<>{}<\0>").format(ndx)
good.write(row + "\n")
good.close()
bad.close()
# okay, let's check
c.execute("SELECT t FROM good{}".format(self.table))
good_set = set(_[0] for _ in c)
c.execute("SELECT datum FROM bad{}".format(self.table))
bad_blob = "|".join(str(_[0]) for _ in c)
# print rowno, ver, repr(isgood), good_set, repr(bad_blob)
for ndx, digit in enumerate(isgood):
row = ("<>{}<>" if digit == "1" else "<>{}<\0>").format(ndx)
if digit == "1":
self.assertIn(row, good_set)
else:
self.assertIn(row, bad_blob)
# COMMIT
class TestPartialReprocessing(unittest.TestCase):
# Run like that:
# $ UNITTEST_PG='host=spbmeta user=oopguser' python test_centrifugation.py TestPartialReprocessing
def setUp(self):
self.conn = psycopg2.connect(dsn=PG)
self.tmpdir = None
self.mkdtemp(None)
def tearDown(self):
self.conn.close()
shutil.rmtree(self.tmpdir)
self.tmpdir = None
def mkdtemp(self, bucket):
if self.tmpdir is not None:
shutil.rmtree(self.tmpdir)
self.tmpdir = None
self.tmpdir = tempfile.mkdtemp()
if bucket is not None:
os.mkdir(os.path.join(self.tmpdir, bucket))
@staticmethod
def take_part_of_bucket(acroot, bucket):
files = os.listdir(os.path.join(acroot, bucket))
# some filtering to speedup processing and reduce data volume
httpreq_blob = sorted(
_ for _ in files if "-http_requests-" in _ and _.endswith(".lz4")
)
webcon_blob = sorted(
_ for _ in files if "-web_connectivity-" in _ and _.endswith(".lz4")
)
webcon_tar = sorted(
_
for _ in files
if _.startswith("web_connectivity.") and _.endswith(".tar.lz4")
) # OONI Probe
questionable = set(httpreq_blob + webcon_blob + webcon_tar)
to_keep = set(webcon_blob[:2] + webcon_tar[:2] + httpreq_blob[:2])
modify = {webcon_tar[0], webcon_blob[0]}
add = {webcon_tar[2]}
files = {
_
for _ in files
if _.endswith(".lz4") and (_ not in questionable or _ in to_keep)
}
return files, modify, add
@staticmethod
def calc_cksum(fname):
with open(fname, "r") as fd:
fd = ChecksummingTee(fd, NopTeeFd)
for _ in iter(functools.partial(fd.read, 4096), ""):
pass
cksum = (fd.size, fd.crc32, fd.sha1)
return cksum
def test_reprocessing(self):
acroot, bucket = "/srv/autoclaved", "2017-08-28"
with self.conn, self.conn.cursor() as c:
# almost every table besides `fingerprint`
c.execute(
"TRUNCATE TABLE autoclaved, badmeta, badrow, dns_a, domain, http_control, http_request, http_request_fp, http_verdict, input, label, measurement, report, residual, software, tcp, vanilla_tor"
)
files, modify, add = self.take_part_of_bucket(acroot, bucket)
# create usual bucket
self.mkdtemp(bucket)
for f in files:
os.symlink(
os.path.join(acroot, bucket, f), os.path.join(self.tmpdir, bucket, f)
)
self.make_bucket_index(acroot, bucket, files, {})
rc, _ = self.call_centrifugation("basic import")
assert rc == 0, "centrifugation failed"
self.retry_centrifugation()
with self.conn, self.conn.cursor() as c:
c.execute(
"""UPDATE autoclaved SET code_ver = 0
WHERE filename = '2017-08-28/web_connectivity.00.tar.lz4'"""
)
magic = self.get_magic_numbers(c, "2017-08-28/web_connectivity.00.tar.lz4")
rc, _ = self.call_centrifugation("reprocess: UPDATE SET code_ver = 0")
assert rc == 0
self.retry_centrifugation()
with self.conn, self.conn.cursor() as c:
self.assertEqual(
magic,
self.get_magic_numbers(c, "2017-08-28/web_connectivity.00.tar.lz4"),
)
# TODO: there is no easy way to verify that no rows were touched as `xmin` is not naive "trx id"
LZ4_EMPTY_FRAME = "\x04\x22\x4d\x18\x64\x40\xa7\x00\x00\x00\x00\x05\x5d\xcc\x02"
addendum = add.pop()
shutil.copyfile(
os.path.join(acroot, bucket, addendum),
os.path.join(self.tmpdir, bucket, addendum),
)
with open(os.path.join(self.tmpdir, bucket, addendum), "a") as fd:
fd.write(LZ4_EMPTY_FRAME)
self.make_bucket_index(acroot, bucket, files | {addendum}, {})
rc, _ = self.call_centrifugation("cksum mismatch")
assert rc == 1, "Hashsum mismatch should lead to failure"
self.make_bucket_index(
acroot,
bucket,
files | {addendum},
{addendum: self.calc_cksum(os.path.join(self.tmpdir, bucket, addendum))},
)
rc, _ = self.call_centrifugation("new file added")
assert rc == 0
self.retry_centrifugation()
self.make_bucket_index(acroot, bucket, files, {})
rc, _ = self.call_centrifugation("file removed")
assert rc == 1, "File removal should lead to failure"
alter = modify.pop()
os.unlink(os.path.join(self.tmpdir, bucket, alter))
shutil.copyfile(
os.path.join(acroot, bucket, alter),
os.path.join(self.tmpdir, bucket, alter),
)
with open(os.path.join(self.tmpdir, bucket, alter), "a") as fd:
fd.write(LZ4_EMPTY_FRAME)
self.make_bucket_index(
acroot,
bucket,
files | {addendum, alter},
{
addendum: self.calc_cksum(os.path.join(self.tmpdir, bucket, addendum)),
alter: self.calc_cksum(os.path.join(self.tmpdir, bucket, alter)),
},
)
with self.conn, self.conn.cursor() as c:
magic = self.get_magic_numbers(c, "{}/{}".format(bucket, alter))
rc, _ = self.call_centrifugation("one altered file")
assert rc == 0
self.retry_centrifugation()
with self.conn, self.conn.cursor() as c:
self.assertEqual(
magic, self.get_magic_numbers(c, "{}/{}".format(bucket, alter))
)
def get_magic_numbers(self, c, filename):
c.execute(
"SELECT autoclaved_no FROM autoclaved WHERE filename = %s", [filename]
)
autoclaved_no = sorted(_[0] for _ in c)
c.execute(
"""SELECT report_no
FROM autoclaved
JOIN report USING (autoclaved_no)
WHERE filename = %s""",
[filename],
)
report_no = sorted(_[0] for _ in c)
c.execute(
"""SELECT msm_no
FROM autoclaved
JOIN report USING (autoclaved_no)
JOIN measurement USING (report_no)
WHERE filename = %s""",
[filename],
)
measurement_no = sorted(_[0] for _ in c)
return autoclaved_no, report_no, measurement_no
def retry_centrifugation(self):
# re-try over same bucket once again
rc, delay = self.call_centrifugation("last state retry")
assert rc == 0, "centrifugation failed"
assert delay < 1, "Ultra-slow retry"
def call_centrifugation(self, msg):
print " ==> centrifugation.py:", msg
start = time.time()
rc = subprocess.call(
[
"./centrifugation.py",
"--start",
"2017-08-28T00:00:00",
"--end",
"2017-08-29T00:00:00",
"--autoclaved-root",
self.tmpdir,
"--postgres",
PG,
]
)
end = time.time()
print " ^^^ centrifugation.py: rc = {:d}, duration = {:1f} sec".format(
rc, end - start
)
return rc, end - start
def make_bucket_index(self, acroot, bucket, files, cksum):
files = {"{}/{}".format(bucket, f) for f in files}
cksum = {"{}/{}".format(bucket, f): v for f, v in cksum.iteritems()}
with gzip.GzipFile(
os.path.join(acroot, bucket, "index.json.gz"), "r"
) as src, gzip.GzipFile(
os.path.join(self.tmpdir, bucket, "index.json.gz"), "w"
) as dst:
use, filename = False, None
for line in src:
if 'file"' in line:
doc = json.loads(line)
if doc["type"] == "file":
filename = doc["filename"]
use = filename in files
if use:
dst.write(line)
elif doc["type"] == "/file":
if not use:
pass
elif filename in cksum:
doc["file_size"], doc["file_crc32"], doc[
"file_sha1"
] = cksum[filename]
doc["file_sha1"] = b64encode(doc["file_sha1"])
dst.write(json.dumps(doc) + "\n")
else:
dst.write(line)
use, filename = False, None
else:
raise RuntimeError("BUG: malicious data not handled")
elif use:
dst.write(line)
class TextExcHash(unittest.TestCase):
def deep_throw(self):
import socket
socket.create_connection(("127.126.125.124", 1)) # ECONREFUSED
def test_stack_dependence(self):
try:
self.deep_throw()
except Exception:
einfo1 = sys.exc_info()
# NB: functools.partial does not add stack frame
self.deep_throw = lambda fn=self.deep_throw: fn()
try:
self.deep_throw()
except Exception:
einfo2 = sys.exc_info()
self.assertEqual(einfo1[0], einfo2[0]) # same type
self.assertEqual(einfo1[1].args, einfo2[1].args) # almost same value
self.assertNotEqual(exc_hash(einfo1), exc_hash(einfo2))
def test_line_independence(self):
try:
self.deep_throw()
except Exception:
einfo1 = sys.exc_info()
try:
self.deep_throw()
except Exception:
einfo2 = sys.exc_info()
self.assertNotEqual(einfo1, einfo2)
self.assertEqual(exc_hash(einfo1), exc_hash(einfo2))
class TestPopValues(unittest.TestCase):
def test_list_of_dicts(self):
self.assertEqual(pop_values({"a": [{}, {}, {}]}), {})
self.assertEqual(pop_values({"a": [{"q": None}, {}, {}]}), {"a": [{"q": None}]})
self.assertEqual(
pop_values({"a": [{}, {"q": None}, {}]}), {"a": [{}, {"q": None}]}
)
self.assertEqual(
pop_values({"a": [{}, {}, {"q": None}]}), {"a": [{}, {}, {"q": None}]}
)
if __name__ == "__main__":
unittest.main() | 0.307046 | 0.141815 |
import re
from ast import AST, parse
from dataclasses import dataclass
from typing import Dict, Iterator, Optional, Tuple
from breakfast.position import Position
WORD = re.compile(r"\w+|\W+")
@dataclass(order=True)
class Source:
lines: Tuple[str, ...]
module_name: str = "module"
file_name: Optional[str] = None
def __hash__(self) -> int:
return hash((self.module_name, self.file_name))
def __post_init__(self) -> None:
self.changes: Dict[ # pylint: disable=attribute-defined-outside-init
int, str
] = {}
def __repr__(self) -> str:
return (
f"{self.__class__}(lines=[...], module_name={repr(self.module_name)}, "
f"file_name={repr(self.file_name)})"
)
def position(self, row: int, column: int) -> Position:
return Position(source=self, row=row, column=column)
def get_name_at(self, position: Position) -> str:
match = WORD.search(self.get_string_starting_at(position))
assert match
return match.group()
def get_ast(self) -> AST:
return parse("\n".join(self.lines))
def render(self) -> str:
return "\n".join(self.changes.get(i, line) for i, line in enumerate(self.lines))
def get_changes(self) -> Iterator[Tuple[int, str]]:
for change in sorted(self.changes.items()):
yield change
def replace(self, position: Position, old: str, new: str) -> None:
self.modify_line(start=position, end=position + len(old), new=new)
def modify_line(self, start: Position, end: Position, new: str) -> None:
line_number = start.row
line = self.changes.get(line_number, self.lines[line_number])
modified_line = line[: start.column] + new + line[end.column :]
self.changes[line_number] = modified_line
def find_after(self, name: str, start: Position) -> Position:
regex = re.compile("\\b{}\\b".format(name))
match = regex.search(self.get_string_starting_at(start))
while start.row <= len(self.lines) and not match:
start = start.next_line()
match = regex.search(self.get_string_starting_at(start))
assert match
return start + match.span()[0]
def get_string_starting_at(self, position: Position) -> str:
return self.lines[position.row][position.column :] | breakfast/source.py | import re
from ast import AST, parse
from dataclasses import dataclass
from typing import Dict, Iterator, Optional, Tuple
from breakfast.position import Position
WORD = re.compile(r"\w+|\W+")
@dataclass(order=True)
class Source:
lines: Tuple[str, ...]
module_name: str = "module"
file_name: Optional[str] = None
def __hash__(self) -> int:
return hash((self.module_name, self.file_name))
def __post_init__(self) -> None:
self.changes: Dict[ # pylint: disable=attribute-defined-outside-init
int, str
] = {}
def __repr__(self) -> str:
return (
f"{self.__class__}(lines=[...], module_name={repr(self.module_name)}, "
f"file_name={repr(self.file_name)})"
)
def position(self, row: int, column: int) -> Position:
return Position(source=self, row=row, column=column)
def get_name_at(self, position: Position) -> str:
match = WORD.search(self.get_string_starting_at(position))
assert match
return match.group()
def get_ast(self) -> AST:
return parse("\n".join(self.lines))
def render(self) -> str:
return "\n".join(self.changes.get(i, line) for i, line in enumerate(self.lines))
def get_changes(self) -> Iterator[Tuple[int, str]]:
for change in sorted(self.changes.items()):
yield change
def replace(self, position: Position, old: str, new: str) -> None:
self.modify_line(start=position, end=position + len(old), new=new)
def modify_line(self, start: Position, end: Position, new: str) -> None:
line_number = start.row
line = self.changes.get(line_number, self.lines[line_number])
modified_line = line[: start.column] + new + line[end.column :]
self.changes[line_number] = modified_line
def find_after(self, name: str, start: Position) -> Position:
regex = re.compile("\\b{}\\b".format(name))
match = regex.search(self.get_string_starting_at(start))
while start.row <= len(self.lines) and not match:
start = start.next_line()
match = regex.search(self.get_string_starting_at(start))
assert match
return start + match.span()[0]
def get_string_starting_at(self, position: Position) -> str:
return self.lines[position.row][position.column :] | 0.85959 | 0.258127 |
import numpy as np
from numpy import random as rnd
import random
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
import progressbar
from PopulationClasses import Population
# defining constant integers to make labeling easy
SUS = 0 # susceptible
INF = 1 # infected
REC = 2 # recovered
DEAD = 3 # dead
age_dist= np.array([6.0, 6.1, 6.5, 6.6, 6.6, 7.1, 6.7, 6.6, 6.1, 6.3, 6.4, 6.6, 6.3, 5.2, 4.1, 2.9, 1.9, 1.9])
under20 = np.sum(age_dist[0:4])
twentyto45 = np.sum(age_dist[4:9])
forty5to55 = np.sum(age_dist[9:11])
fifty5to65 = np.sum(age_dist[11:13])
sixty5to75 = np.sum(age_dist[13:15])
seventy5to85 = np.sum(age_dist[15:17])
eighty5plus = np.sum(age_dist[17:len(age_dist)])
#arranged ages according to the intervals set for death rates
age_dist = np.array([under20,twentyto45,forty5to55,fifty5to65,sixty5to75,seventy5to85,eighty5plus])
age_dist = age_dist / np.sum(age_dist) # probability distribution over ages
# create a population under some parameters and run a simulation of infection over time
# note that one timestep is a day
nDays = 90
size = 2000 # population of guilford (2016)
I0 = 5
p_connect = 4 / size # have connections with avg of 10 people
population = Population(size,age_dist,I0,p_connect)
population.prepSimulation(nDays)
p_infect = .05 # get this from diamond cruise ship
population.showConnections()
plt.show() # this just takes a lot of time in large population
widgets = [progressbar.Percentage(), progressbar.Bar()]
bar = progressbar.ProgressBar(widgets=widgets,maxval=nDays).start()
for day in range(1,nDays+1): # count 0 as initial day
bar.update(day)
# 1. propagate infection and advance infections
nInfected = len(population.infectedPool)
infectCount = 0
# draw a random vector of # interax of len(infectedPool) w/ Pois or Binomial noise
for infected in population.infectedPool: # use iter to get the index
infectCount += 1 # then we can also get rid of the infectCount
new_status = infected.step()
if new_status != INF: # change status
population.infectedPool.remove(infected) # take out of infected pool
population.statuses[infected.id] = new_status
else: # we're still infected
for conn_idx in infected.connections:
if population.people[conn_idx].status == SUS:
if rnd.rand() < p_infect:
population.people[conn_idx].getInfected()
population.infectedPool.append(population.people[conn_idx])
population.statuses[conn_idx] = INF
if infectCount == nInfected: # to prevent infinite infection recursion
break
# 2. record the new population statistics
population.nSus[day] = len(np.where(population.statuses == SUS)[0])
population.nRec[day] = len(np.where(population.statuses == REC)[0])
population.nInf[day] = len(np.where(population.statuses == INF)[0])
population.nDead[day] = len(np.where(population.statuses == DEAD)[0])
# 3. test (demo of dynamic testing)
if day < 30:
n_tests = 50
else:
n_tests = 500
population.test(n_tests,day)
plt.figure()
# should really get a line in here to dynamically reshape lol
sns.heatmap(population.statuses.reshape((50,40)),cbar = False)
plt.title("Heatmap of Individual Outcomes for Average %i Interactions"%(int(size * p_connect)))
# plt.title("'\"Opening the Country Up\": Heatmap of Individual Outcomes for Average 3->10 Interactions")
population.plotStatistics(testing = True)
print("Dead:",population.nDead[-1])
plt.show() | runSimulation.py | import numpy as np
from numpy import random as rnd
import random
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
import progressbar
from PopulationClasses import Population
# defining constant integers to make labeling easy
SUS = 0 # susceptible
INF = 1 # infected
REC = 2 # recovered
DEAD = 3 # dead
age_dist= np.array([6.0, 6.1, 6.5, 6.6, 6.6, 7.1, 6.7, 6.6, 6.1, 6.3, 6.4, 6.6, 6.3, 5.2, 4.1, 2.9, 1.9, 1.9])
under20 = np.sum(age_dist[0:4])
twentyto45 = np.sum(age_dist[4:9])
forty5to55 = np.sum(age_dist[9:11])
fifty5to65 = np.sum(age_dist[11:13])
sixty5to75 = np.sum(age_dist[13:15])
seventy5to85 = np.sum(age_dist[15:17])
eighty5plus = np.sum(age_dist[17:len(age_dist)])
#arranged ages according to the intervals set for death rates
age_dist = np.array([under20,twentyto45,forty5to55,fifty5to65,sixty5to75,seventy5to85,eighty5plus])
age_dist = age_dist / np.sum(age_dist) # probability distribution over ages
# create a population under some parameters and run a simulation of infection over time
# note that one timestep is a day
nDays = 90
size = 2000 # population of guilford (2016)
I0 = 5
p_connect = 4 / size # have connections with avg of 10 people
population = Population(size,age_dist,I0,p_connect)
population.prepSimulation(nDays)
p_infect = .05 # get this from diamond cruise ship
population.showConnections()
plt.show() # this just takes a lot of time in large population
widgets = [progressbar.Percentage(), progressbar.Bar()]
bar = progressbar.ProgressBar(widgets=widgets,maxval=nDays).start()
for day in range(1,nDays+1): # count 0 as initial day
bar.update(day)
# 1. propagate infection and advance infections
nInfected = len(population.infectedPool)
infectCount = 0
# draw a random vector of # interax of len(infectedPool) w/ Pois or Binomial noise
for infected in population.infectedPool: # use iter to get the index
infectCount += 1 # then we can also get rid of the infectCount
new_status = infected.step()
if new_status != INF: # change status
population.infectedPool.remove(infected) # take out of infected pool
population.statuses[infected.id] = new_status
else: # we're still infected
for conn_idx in infected.connections:
if population.people[conn_idx].status == SUS:
if rnd.rand() < p_infect:
population.people[conn_idx].getInfected()
population.infectedPool.append(population.people[conn_idx])
population.statuses[conn_idx] = INF
if infectCount == nInfected: # to prevent infinite infection recursion
break
# 2. record the new population statistics
population.nSus[day] = len(np.where(population.statuses == SUS)[0])
population.nRec[day] = len(np.where(population.statuses == REC)[0])
population.nInf[day] = len(np.where(population.statuses == INF)[0])
population.nDead[day] = len(np.where(population.statuses == DEAD)[0])
# 3. test (demo of dynamic testing)
if day < 30:
n_tests = 50
else:
n_tests = 500
population.test(n_tests,day)
plt.figure()
# should really get a line in here to dynamically reshape lol
sns.heatmap(population.statuses.reshape((50,40)),cbar = False)
plt.title("Heatmap of Individual Outcomes for Average %i Interactions"%(int(size * p_connect)))
# plt.title("'\"Opening the Country Up\": Heatmap of Individual Outcomes for Average 3->10 Interactions")
population.plotStatistics(testing = True)
print("Dead:",population.nDead[-1])
plt.show() | 0.303113 | 0.593491 |
import os
import collections
import itertools
from . import helpers
__all__ = ('Display',)
class Graphic:
_Visual = collections.namedtuple('Visual', 'dirty ready clean')
__slots__ = ('_io', '_cursor', '_visuals', '_origin', '_width')
def __init__(self, io, cursor):
self._io = io
self._cursor = cursor
self._visuals = []
self._origin = None
self._width = None
@property
def visuals(self):
return self._visuals
def reset(self):
self._visuals.clear()
def resize(self, width):
self._width = width
def _locate(self):
(cy, cx) = self._cursor.locate()
self._origin = cx - 1
def locate(self):
self._locate()
def _originate(self, index):
if index < 0:
return self._origin
visual = self._visuals[index]
lines = visual.clean.rsplit(os.linesep, 1)
origin = len(lines.pop()) # removes last
if not lines: # checks if empty
origin += self._originate(index - 1)
return origin
def _draw(self, index):
visuals = self._visuals[index:]
for visual in visuals:
self._io.send(visual.ready)
def _clear(self, index):
visuals = self._visuals[index:]
ysize = 0
for visual in visuals:
ysize += visual.clean.count(os.linesep)
self._cursor.last(ysize)
xsize = self._originate(index - 1)
self._cursor.right(xsize)
self._cursor.clear()
@staticmethod
def _clean(value):
value = helpers.seq.clean(value)
runes = helpers.clean(value, ignore = {os.linesep})
value = ''.join(runes)
return value
def _format(self, index, value):
clean = self._clean(value)
lines = clean.split(os.linesep)
current = self._originate(index - 1)
# injects \n whenever part of each
# line is about to exceed the width
step = self._width
for (state, line) in enumerate(lines):
index = step
if not state:
index -= current
for cycle in itertools.count():
if not index < len(line):
break
value = helpers.seq.inject(value, index + cycle, os.linesep)
index += step
return value
def _build(self, index, dirty):
ready = self._format(index, dirty)
clean = self._clean(ready)
visual = self._Visual(dirty, ready, clean)
return visual
def _insert(self, index, value):
visual = self._build(index, value)
self._visuals.insert(index, visual)
after = index + 1
values = []
while True:
try:
visual = self._visuals.pop(after)
except IndexError:
break
values.append(visual.dirty)
for (subindex, value) in enumerate(values, start = after):
visual = self._build(subindex, value)
self._visuals.insert(subindex, visual)
self._draw(index)
return visual
def _create(self, index, value):
visual = self._insert(index, value)
return visual
def create(self, value, index = None):
if index is None:
index = len(self._visuals)
return self._create(index, value)
def _remove(self, index):
self._clear(index)
visual = self._visuals.pop(index)
return visual
def _delete(self, index):
visual = self._remove(index)
self._draw(index)
return visual
def delete(self, index):
return self._delete(index)
def _update(self, index, value):
self._remove(index)
visual = self._insert(index, value)
return visual
def update(self, index, value):
return self._update(index, value)
class Display:
__slots__ = ('_graphic',)
def __init__(self, io, cursor):
self._graphic = Graphic(io, cursor)
@property
def _size(self):
return len(self._graphic.visuals)
def locate(self, width):
self._graphic.locate()
self._graphic.resize(width)
def create(self, value, *rest, fall = 0):
values = [value, *rest, fall * os.linesep]
for value in values:
if value is None:
value = ''
self._graphic.create(value)
def update(self, *values):
if len(values) > self._size - 1:
raise ValueError('too many values')
pairs = tuple(enumerate(values, start = 1))
for (index, value) in reversed(pairs):
if value is None:
continue
self._graphic.update(index, value)
def finish(self, value, full = False):
enter = not full
leave = self._size
indexes = range(enter, leave)
for index in reversed(indexes):
self._graphic.delete(index)
if not value is None:
self._graphic.create(value)
self._graphic.reset() | macOS/Xcode/Maestral/Maestral/app_packages/survey/display.py | import os
import collections
import itertools
from . import helpers
__all__ = ('Display',)
class Graphic:
_Visual = collections.namedtuple('Visual', 'dirty ready clean')
__slots__ = ('_io', '_cursor', '_visuals', '_origin', '_width')
def __init__(self, io, cursor):
self._io = io
self._cursor = cursor
self._visuals = []
self._origin = None
self._width = None
@property
def visuals(self):
return self._visuals
def reset(self):
self._visuals.clear()
def resize(self, width):
self._width = width
def _locate(self):
(cy, cx) = self._cursor.locate()
self._origin = cx - 1
def locate(self):
self._locate()
def _originate(self, index):
if index < 0:
return self._origin
visual = self._visuals[index]
lines = visual.clean.rsplit(os.linesep, 1)
origin = len(lines.pop()) # removes last
if not lines: # checks if empty
origin += self._originate(index - 1)
return origin
def _draw(self, index):
visuals = self._visuals[index:]
for visual in visuals:
self._io.send(visual.ready)
def _clear(self, index):
visuals = self._visuals[index:]
ysize = 0
for visual in visuals:
ysize += visual.clean.count(os.linesep)
self._cursor.last(ysize)
xsize = self._originate(index - 1)
self._cursor.right(xsize)
self._cursor.clear()
@staticmethod
def _clean(value):
value = helpers.seq.clean(value)
runes = helpers.clean(value, ignore = {os.linesep})
value = ''.join(runes)
return value
def _format(self, index, value):
clean = self._clean(value)
lines = clean.split(os.linesep)
current = self._originate(index - 1)
# injects \n whenever part of each
# line is about to exceed the width
step = self._width
for (state, line) in enumerate(lines):
index = step
if not state:
index -= current
for cycle in itertools.count():
if not index < len(line):
break
value = helpers.seq.inject(value, index + cycle, os.linesep)
index += step
return value
def _build(self, index, dirty):
ready = self._format(index, dirty)
clean = self._clean(ready)
visual = self._Visual(dirty, ready, clean)
return visual
def _insert(self, index, value):
visual = self._build(index, value)
self._visuals.insert(index, visual)
after = index + 1
values = []
while True:
try:
visual = self._visuals.pop(after)
except IndexError:
break
values.append(visual.dirty)
for (subindex, value) in enumerate(values, start = after):
visual = self._build(subindex, value)
self._visuals.insert(subindex, visual)
self._draw(index)
return visual
def _create(self, index, value):
visual = self._insert(index, value)
return visual
def create(self, value, index = None):
if index is None:
index = len(self._visuals)
return self._create(index, value)
def _remove(self, index):
self._clear(index)
visual = self._visuals.pop(index)
return visual
def _delete(self, index):
visual = self._remove(index)
self._draw(index)
return visual
def delete(self, index):
return self._delete(index)
def _update(self, index, value):
self._remove(index)
visual = self._insert(index, value)
return visual
def update(self, index, value):
return self._update(index, value)
class Display:
__slots__ = ('_graphic',)
def __init__(self, io, cursor):
self._graphic = Graphic(io, cursor)
@property
def _size(self):
return len(self._graphic.visuals)
def locate(self, width):
self._graphic.locate()
self._graphic.resize(width)
def create(self, value, *rest, fall = 0):
values = [value, *rest, fall * os.linesep]
for value in values:
if value is None:
value = ''
self._graphic.create(value)
def update(self, *values):
if len(values) > self._size - 1:
raise ValueError('too many values')
pairs = tuple(enumerate(values, start = 1))
for (index, value) in reversed(pairs):
if value is None:
continue
self._graphic.update(index, value)
def finish(self, value, full = False):
enter = not full
leave = self._size
indexes = range(enter, leave)
for index in reversed(indexes):
self._graphic.delete(index)
if not value is None:
self._graphic.create(value)
self._graphic.reset() | 0.603231 | 0.355132 |
import pytest
import responses
import mock
from celery.exceptions import Retry
from django.core.cache import cache
from django.test import SimpleTestCase
from django.test import TestCase, override_settings
from pyfakefs.fake_filesystem_unittest import TestCase as FakeFsTestCase
from mii_rss.factories import FeedEntriesFactory, FeedFilterFactory
from mii_rss.logic import already_exists, match, get_or_create_downloading_object, get_dict_from_feeds
from mii_rss.models import FeedDownloaded, FeedEntries
from mii_rss.tasks import check_feed_and_download_torrents, recheck_feed_and_download_torrents, \
add_torrent_to_transmission, get_hashed_link
from mii_sorter.models import Season, Episode
from mii_sorter.models import Serie
class TestRSS(TestCase):
def test_match(self):
entry = {
'title': 'homeland s04e09 theres something else going on 1080i hdtv dd5 1 mpeg2-topkek [no rar]'
}
filters = {
'^homeland.*720p',
'^star.wars.rebels.*720p',
'^better.call.saul.*720p'
}
assert not match(entry, filters)[0]
entry = {
'title': 'homeland s04e09 theres something else going on 720p hdtv dd5 1 mpeg2-topkek [no rar]'
}
assert match(entry, filters)[0]
entry = {
'title': 'better call saul s01e01 720p hdtv x264-killers [no rar]'
}
assert match(entry, filters)[0]
def test_episode_does_not_already_exist(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert not already_exists(db_name, title)
def test_episode_already_exists(self):
serie = Serie.objects.create(name='Saitama')
season = Season.objects.create(number=1, serie=serie)
Episode.objects.create(number=1, season=season, file_size=100, file_path='')
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert already_exists(db_name, title)
def test_season_does_not_exist(self):
db_name = 'Saitama'
title = 'Saitama.S01.rar'
assert already_exists(db_name, title)
def test_get_or_create_downloading_object_episode_create(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert get_or_create_downloading_object(db_name, title)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_episode_get(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
FeedDownloaded.objects.create(re_filter=db_name, episode=1, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_create(self):
db_name = 'Saitama'
title = 'Saitama.S01'
assert get_or_create_downloading_object(db_name, title)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_get(self):
db_name = 'Saitama'
title = 'Saitama.S01'
FeedDownloaded.objects.create(re_filter=db_name, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_get_blocks_episode(self):
db_name = 'Saitama'
title = 'Saitama.S01E01'
FeedDownloaded.objects.create(re_filter=db_name, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_entry_from_feed(self):
class Feed(object):
def __getitem__(self, item):
return item
list_of_feed = [Feed() for x in range(0, 5)]
resulting_dict = get_dict_from_feeds(list_of_feed)
assert resulting_dict == {'entries': [{'title': 'title', 'link': 'link'} for x in range(0, 5)]}
@override_settings(TORRENT_WATCHED_FOLDER='/')
class TestTask(FakeFsTestCase, TestCase):
def setUp(self):
self.setUpPyfakefs()
FeedFilterFactory.create(regex='non_matching', name='test_entry')
@mock.patch('mii_rss.tasks.logger')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_error(self, feedparser, logger):
feedparser.parse.return_value = {'status': 500}
check_feed_and_download_torrents()
assert logger.error.called
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_dumping_entries(self, feedparser):
feedparser.parse.return_value = {'status': 200, 'entries': []}
check_feed_and_download_torrents()
assert FeedEntries.objects.all()
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed(self, feedparser):
feedparser.parse.return_value = {'status': 200, 'entries': [{'title': 'arrow', 'link': None}]}
check_feed_and_download_torrents()
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_matching_already_exist(self, feedparser, add_torrent_to_transmission):
self.fs.CreateFile('/test.torrent')
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert not add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_matching_downloading(self, feedparser, add_torrent_to_transmission):
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
@mock.patch('mii_rss.tasks.get_or_create_downloading_object')
def test_task_feed_matching_already_downloading(self, get_or_create, feedparser, add_torrent_to_transmission):
get_or_create.return_value = False
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert not add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.process_feeds')
def test_recheck_feeds(self, process_feeds):
FeedEntriesFactory.create_batch(10)
recheck_feed_and_download_torrents()
assert process_feeds.called
@responses.activate
@override_settings(TRANSMISSION_RPC_URL='http://url/')
class TestTaskTransmission(SimpleTestCase):
def test_add_t_to_transmission_retry(self):
url_link = 'http://t_link'
responses.add(responses.GET, url_link,
body='base64,dummy_test', status=200,
content_type='application/text')
responses.add(responses.POST, 'http://url/',
status=409,
headers={'X-Transmission-Session-Id': 'special_key'})
res = add_torrent_to_transmission(url_link)
assert isinstance(res, Retry)
assert cache.get('X-Transmission-Session-Id') == 'special_key'
def test_with_header_and_content_success(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=200,
json={'result': 'success'},
headers={'X-Transmission-Session-Id': 'special_key'})
def test_with_header_and_content_almost_success(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=200,
json={'result': 'not a success'},
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link)
def test_with_header_and_content_500(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=500,
bode='FAILURE',
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link)
def test_with_header_and_content_400(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=400,
bode='FAILURE',
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link) | tests/test_mii_rss.py | import pytest
import responses
import mock
from celery.exceptions import Retry
from django.core.cache import cache
from django.test import SimpleTestCase
from django.test import TestCase, override_settings
from pyfakefs.fake_filesystem_unittest import TestCase as FakeFsTestCase
from mii_rss.factories import FeedEntriesFactory, FeedFilterFactory
from mii_rss.logic import already_exists, match, get_or_create_downloading_object, get_dict_from_feeds
from mii_rss.models import FeedDownloaded, FeedEntries
from mii_rss.tasks import check_feed_and_download_torrents, recheck_feed_and_download_torrents, \
add_torrent_to_transmission, get_hashed_link
from mii_sorter.models import Season, Episode
from mii_sorter.models import Serie
class TestRSS(TestCase):
def test_match(self):
entry = {
'title': 'homeland s04e09 theres something else going on 1080i hdtv dd5 1 mpeg2-topkek [no rar]'
}
filters = {
'^homeland.*720p',
'^star.wars.rebels.*720p',
'^better.call.saul.*720p'
}
assert not match(entry, filters)[0]
entry = {
'title': 'homeland s04e09 theres something else going on 720p hdtv dd5 1 mpeg2-topkek [no rar]'
}
assert match(entry, filters)[0]
entry = {
'title': 'better call saul s01e01 720p hdtv x264-killers [no rar]'
}
assert match(entry, filters)[0]
def test_episode_does_not_already_exist(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert not already_exists(db_name, title)
def test_episode_already_exists(self):
serie = Serie.objects.create(name='Saitama')
season = Season.objects.create(number=1, serie=serie)
Episode.objects.create(number=1, season=season, file_size=100, file_path='')
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert already_exists(db_name, title)
def test_season_does_not_exist(self):
db_name = 'Saitama'
title = 'Saitama.S01.rar'
assert already_exists(db_name, title)
def test_get_or_create_downloading_object_episode_create(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert get_or_create_downloading_object(db_name, title)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_episode_get(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
FeedDownloaded.objects.create(re_filter=db_name, episode=1, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_create(self):
db_name = 'Saitama'
title = 'Saitama.S01'
assert get_or_create_downloading_object(db_name, title)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_get(self):
db_name = 'Saitama'
title = 'Saitama.S01'
FeedDownloaded.objects.create(re_filter=db_name, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_get_blocks_episode(self):
db_name = 'Saitama'
title = 'Saitama.S01E01'
FeedDownloaded.objects.create(re_filter=db_name, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_entry_from_feed(self):
class Feed(object):
def __getitem__(self, item):
return item
list_of_feed = [Feed() for x in range(0, 5)]
resulting_dict = get_dict_from_feeds(list_of_feed)
assert resulting_dict == {'entries': [{'title': 'title', 'link': 'link'} for x in range(0, 5)]}
@override_settings(TORRENT_WATCHED_FOLDER='/')
class TestTask(FakeFsTestCase, TestCase):
def setUp(self):
self.setUpPyfakefs()
FeedFilterFactory.create(regex='non_matching', name='test_entry')
@mock.patch('mii_rss.tasks.logger')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_error(self, feedparser, logger):
feedparser.parse.return_value = {'status': 500}
check_feed_and_download_torrents()
assert logger.error.called
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_dumping_entries(self, feedparser):
feedparser.parse.return_value = {'status': 200, 'entries': []}
check_feed_and_download_torrents()
assert FeedEntries.objects.all()
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed(self, feedparser):
feedparser.parse.return_value = {'status': 200, 'entries': [{'title': 'arrow', 'link': None}]}
check_feed_and_download_torrents()
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_matching_already_exist(self, feedparser, add_torrent_to_transmission):
self.fs.CreateFile('/test.torrent')
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert not add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_matching_downloading(self, feedparser, add_torrent_to_transmission):
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
@mock.patch('mii_rss.tasks.get_or_create_downloading_object')
def test_task_feed_matching_already_downloading(self, get_or_create, feedparser, add_torrent_to_transmission):
get_or_create.return_value = False
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert not add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.process_feeds')
def test_recheck_feeds(self, process_feeds):
FeedEntriesFactory.create_batch(10)
recheck_feed_and_download_torrents()
assert process_feeds.called
@responses.activate
@override_settings(TRANSMISSION_RPC_URL='http://url/')
class TestTaskTransmission(SimpleTestCase):
def test_add_t_to_transmission_retry(self):
url_link = 'http://t_link'
responses.add(responses.GET, url_link,
body='base64,dummy_test', status=200,
content_type='application/text')
responses.add(responses.POST, 'http://url/',
status=409,
headers={'X-Transmission-Session-Id': 'special_key'})
res = add_torrent_to_transmission(url_link)
assert isinstance(res, Retry)
assert cache.get('X-Transmission-Session-Id') == 'special_key'
def test_with_header_and_content_success(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=200,
json={'result': 'success'},
headers={'X-Transmission-Session-Id': 'special_key'})
def test_with_header_and_content_almost_success(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=200,
json={'result': 'not a success'},
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link)
def test_with_header_and_content_500(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=500,
bode='FAILURE',
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link)
def test_with_header_and_content_400(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=400,
bode='FAILURE',
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link) | 0.534127 | 0.284651 |
import argparse
from graph4nlp.pytorch.modules.config import get_basic_args
from graph4nlp.pytorch.modules.utils.config_utils import get_yaml_config, update_values
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_yaml",
type=str,
default="examples/pytorch/semantic_parsing/graph2seq/config/new_dependency_gat_bi_sep.yaml",
help="",
)
# default="examples/pytorch/semantic_parsing/graph2seq/config/new_dynamic_refine.yaml", help="")
# default = "examples/pytorch/semantic_parsing/graph2seq/config/new_dynamic.yaml", help = "")
# default="examples/pytorch/semantic_parsing/graph2seq/config/new_constituency.yaml", help="")
parser.add_argument("--word-emb-size", type=int, default=300, help="")
parser.add_argument(
"--log-file", type=str, default="examples/pytorch/semantic_parsing/graph2seq/log/ggnn.txt"
)
parser.add_argument(
"--checkpoint-save-path",
type=str,
default="examples/pytorch/semantic_parsing/graph2seq/save",
)
parser.add_argument("--learning-rate", type=float, default=1e-3, help="")
parser.add_argument("--loss-display-step", type=int, default=3, help=" ")
parser.add_argument("--eval-display-number", type=int, default=3, help="")
parser.add_argument("--lr-start-decay-epoch", type=int, default=20, help="")
parser.add_argument("--lr-decay-rate", type=float, default=0.9)
parser.add_argument("--lr-decay-per-epoch", type=int, default=5, help="")
parser.add_argument("--min-lr", type=float, default=1e-3, help="")
parser.add_argument(
"--use-gpu", type=float, default=1, help="0 for don't use cuda, 1 for using cuda"
)
parser.add_argument(
"--num_works", type=int, default=0, help="The number of works for Dataloader."
)
parser.add_argument("--gpu", type=int, default=0, help="gpu id")
parser.add_argument("--seed", type=int, default=1236, help="")
# dataset config
parser.add_argument("--batch_size", type=int, default=24, help="the size of one mini-batch")
parser.add_argument("--share-vocab", type=bool, default=True, help="whether to share vocab")
parser.add_argument("--val_split_ratio", type=float, default=0, help="")
parser.add_argument("--pretrained_word_emb_name", type=str, default="6B", help="")
parser.add_argument("--pretrained_word_emb_url", type=str, default=None, help="")
parser.add_argument(
"--pretrained_word_emb_cache_dir", type=str, default=".vector_cache", help=""
)
parser.add_argument("--beam-size", type=int, default=4, help="the beam size of beam search")
cfg = parser.parse_args()
our_args = get_yaml_config(cfg.dataset_yaml)
template = get_basic_args(
graph_construction_name=our_args["graph_construction_name"],
graph_embedding_name=our_args["graph_embedding_name"],
decoder_name=our_args["decoder_name"],
)
update_values(to_args=template, from_args_list=[our_args, vars(cfg)])
return template | examples/pytorch/semantic_parsing/graph2seq/args.py | import argparse
from graph4nlp.pytorch.modules.config import get_basic_args
from graph4nlp.pytorch.modules.utils.config_utils import get_yaml_config, update_values
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_yaml",
type=str,
default="examples/pytorch/semantic_parsing/graph2seq/config/new_dependency_gat_bi_sep.yaml",
help="",
)
# default="examples/pytorch/semantic_parsing/graph2seq/config/new_dynamic_refine.yaml", help="")
# default = "examples/pytorch/semantic_parsing/graph2seq/config/new_dynamic.yaml", help = "")
# default="examples/pytorch/semantic_parsing/graph2seq/config/new_constituency.yaml", help="")
parser.add_argument("--word-emb-size", type=int, default=300, help="")
parser.add_argument(
"--log-file", type=str, default="examples/pytorch/semantic_parsing/graph2seq/log/ggnn.txt"
)
parser.add_argument(
"--checkpoint-save-path",
type=str,
default="examples/pytorch/semantic_parsing/graph2seq/save",
)
parser.add_argument("--learning-rate", type=float, default=1e-3, help="")
parser.add_argument("--loss-display-step", type=int, default=3, help=" ")
parser.add_argument("--eval-display-number", type=int, default=3, help="")
parser.add_argument("--lr-start-decay-epoch", type=int, default=20, help="")
parser.add_argument("--lr-decay-rate", type=float, default=0.9)
parser.add_argument("--lr-decay-per-epoch", type=int, default=5, help="")
parser.add_argument("--min-lr", type=float, default=1e-3, help="")
parser.add_argument(
"--use-gpu", type=float, default=1, help="0 for don't use cuda, 1 for using cuda"
)
parser.add_argument(
"--num_works", type=int, default=0, help="The number of works for Dataloader."
)
parser.add_argument("--gpu", type=int, default=0, help="gpu id")
parser.add_argument("--seed", type=int, default=1236, help="")
# dataset config
parser.add_argument("--batch_size", type=int, default=24, help="the size of one mini-batch")
parser.add_argument("--share-vocab", type=bool, default=True, help="whether to share vocab")
parser.add_argument("--val_split_ratio", type=float, default=0, help="")
parser.add_argument("--pretrained_word_emb_name", type=str, default="6B", help="")
parser.add_argument("--pretrained_word_emb_url", type=str, default=None, help="")
parser.add_argument(
"--pretrained_word_emb_cache_dir", type=str, default=".vector_cache", help=""
)
parser.add_argument("--beam-size", type=int, default=4, help="the beam size of beam search")
cfg = parser.parse_args()
our_args = get_yaml_config(cfg.dataset_yaml)
template = get_basic_args(
graph_construction_name=our_args["graph_construction_name"],
graph_embedding_name=our_args["graph_embedding_name"],
decoder_name=our_args["decoder_name"],
)
update_values(to_args=template, from_args_list=[our_args, vars(cfg)])
return template | 0.538498 | 0.187356 |
from hashlib import sha256
from types import GeneratorType
import xmlsec
from django.http import HttpRequest
from lxml import etree # nosec
from lxml.etree import Element, SubElement # nosec
from structlog.stdlib import get_logger
from authentik.core.exceptions import PropertyMappingExpressionException
from authentik.events.models import Event, EventAction
from authentik.lib.utils.time import timedelta_from_string
from authentik.providers.saml.models import SAMLPropertyMapping, SAMLProvider
from authentik.providers.saml.processors.request_parser import AuthNRequest
from authentik.providers.saml.utils import get_random_id
from authentik.providers.saml.utils.time import get_time_string
from authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME
from authentik.sources.saml.exceptions import UnsupportedNameIDFormat
from authentik.sources.saml.processors.constants import (
DIGEST_ALGORITHM_TRANSLATION_MAP,
NS_MAP,
NS_SAML_ASSERTION,
NS_SAML_PROTOCOL,
SAML_NAME_ID_FORMAT_EMAIL,
SAML_NAME_ID_FORMAT_PERSISTENT,
SAML_NAME_ID_FORMAT_TRANSIENT,
SAML_NAME_ID_FORMAT_UNSPECIFIED,
SAML_NAME_ID_FORMAT_WINDOWS,
SAML_NAME_ID_FORMAT_X509,
SIGN_ALGORITHM_TRANSFORM_MAP,
)
LOGGER = get_logger()
class AssertionProcessor:
"""Generate a SAML Response from an AuthNRequest"""
provider: SAMLProvider
http_request: HttpRequest
auth_n_request: AuthNRequest
_issue_instant: str
_assertion_id: str
_valid_not_before: str
_session_not_on_or_after: str
_valid_not_on_or_after: str
def __init__(self, provider: SAMLProvider, request: HttpRequest, auth_n_request: AuthNRequest):
self.provider = provider
self.http_request = request
self.auth_n_request = auth_n_request
self._issue_instant = get_time_string()
self._assertion_id = get_random_id()
self._valid_not_before = get_time_string(
timedelta_from_string(self.provider.assertion_valid_not_before)
)
self._session_not_on_or_after = get_time_string(
timedelta_from_string(self.provider.session_valid_not_on_or_after)
)
self._valid_not_on_or_after = get_time_string(
timedelta_from_string(self.provider.assertion_valid_not_on_or_after)
)
def get_attributes(self) -> Element:
"""Get AttributeStatement Element with Attributes from Property Mappings."""
# https://commons.lbl.gov/display/IDMgmt/Attribute+Definitions
attribute_statement = Element(f"{{{NS_SAML_ASSERTION}}}AttributeStatement")
user = self.http_request.user
for mapping in self.provider.property_mappings.all().select_subclasses():
if not isinstance(mapping, SAMLPropertyMapping):
continue
try:
mapping: SAMLPropertyMapping
value = mapping.evaluate(
user=user,
request=self.http_request,
provider=self.provider,
)
if value is None:
continue
attribute = Element(f"{{{NS_SAML_ASSERTION}}}Attribute")
if mapping.friendly_name and mapping.friendly_name != "":
attribute.attrib["FriendlyName"] = mapping.friendly_name
attribute.attrib["Name"] = mapping.saml_name
if not isinstance(value, (list, GeneratorType)):
value = [value]
for value_item in value:
attribute_value = SubElement(
attribute, f"{{{NS_SAML_ASSERTION}}}AttributeValue"
)
if not isinstance(value_item, str):
value_item = str(value_item)
attribute_value.text = value_item
attribute_statement.append(attribute)
except (PropertyMappingExpressionException, ValueError) as exc:
# Value error can be raised when assigning invalid data to an attribute
Event.new(
EventAction.CONFIGURATION_ERROR,
message=f"Failed to evaluate property-mapping: {str(exc)}",
mapping=mapping,
).from_http(self.http_request)
continue
return attribute_statement
def get_issuer(self) -> Element:
"""Get Issuer Element"""
issuer = Element(f"{{{NS_SAML_ASSERTION}}}Issuer", nsmap=NS_MAP)
issuer.text = self.provider.issuer
return issuer
def get_assertion_auth_n_statement(self) -> Element:
"""Generate AuthnStatement with AuthnContext and ContextClassRef Elements."""
auth_n_statement = Element(f"{{{NS_SAML_ASSERTION}}}AuthnStatement")
auth_n_statement.attrib["AuthnInstant"] = self._valid_not_before
auth_n_statement.attrib["SessionIndex"] = self._assertion_id
auth_n_statement.attrib["SessionNotOnOrAfter"] = self._session_not_on_or_after
auth_n_context = SubElement(auth_n_statement, f"{{{NS_SAML_ASSERTION}}}AuthnContext")
auth_n_context_class_ref = SubElement(
auth_n_context, f"{{{NS_SAML_ASSERTION}}}AuthnContextClassRef"
)
auth_n_context_class_ref.text = (
"urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport"
)
return auth_n_statement
def get_assertion_conditions(self) -> Element:
"""Generate Conditions with AudienceRestriction and Audience Elements."""
conditions = Element(f"{{{NS_SAML_ASSERTION}}}Conditions")
conditions.attrib["NotBefore"] = self._valid_not_before
conditions.attrib["NotOnOrAfter"] = self._valid_not_on_or_after
if self.provider.audience != "":
audience_restriction = SubElement(
conditions, f"{{{NS_SAML_ASSERTION}}}AudienceRestriction"
)
audience = SubElement(audience_restriction, f"{{{NS_SAML_ASSERTION}}}Audience")
audience.text = self.provider.audience
return conditions
# pylint: disable=too-many-return-statements
def get_name_id(self) -> Element:
"""Get NameID Element"""
name_id = Element(f"{{{NS_SAML_ASSERTION}}}NameID")
name_id.attrib["Format"] = self.auth_n_request.name_id_policy
# persistent is used as a fallback, so always generate it
persistent = self.http_request.user.uid
name_id.text = persistent
# If name_id_mapping is set, we override the value, regardless of what the SP asks for
if self.provider.name_id_mapping:
try:
value = self.provider.name_id_mapping.evaluate(
user=self.http_request.user,
request=self.http_request,
provider=self.provider,
)
if value is not None:
name_id.text = str(value)
return name_id
except PropertyMappingExpressionException as exc:
Event.new(
EventAction.CONFIGURATION_ERROR,
message=f"Failed to evaluate property-mapping: {str(exc)}",
mapping=self.provider.name_id_mapping,
).from_http(self.http_request)
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_EMAIL:
name_id.text = self.http_request.user.email
return name_id
if name_id.attrib["Format"] in [
SAML_NAME_ID_FORMAT_PERSISTENT,
SAML_NAME_ID_FORMAT_UNSPECIFIED,
]:
name_id.text = persistent
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_X509:
# This attribute is statically set by the LDAP source
name_id.text = self.http_request.user.attributes.get(
LDAP_DISTINGUISHED_NAME, persistent
)
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_WINDOWS:
# This attribute is statically set by the LDAP source
name_id.text = self.http_request.user.attributes.get("upn", persistent)
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_TRANSIENT:
# Use the hash of the user's session, which changes every session
session_key: str = self.http_request.session.session_key
name_id.text = sha256(session_key.encode()).hexdigest()
return name_id
raise UnsupportedNameIDFormat(
f"Assertion contains NameID with unsupported format {name_id.attrib['Format']}."
)
def get_assertion_subject(self) -> Element:
"""Generate Subject Element with NameID and SubjectConfirmation Objects"""
subject = Element(f"{{{NS_SAML_ASSERTION}}}Subject")
subject.append(self.get_name_id())
subject_confirmation = SubElement(subject, f"{{{NS_SAML_ASSERTION}}}SubjectConfirmation")
subject_confirmation.attrib["Method"] = "urn:oasis:names:tc:SAML:2.0:cm:bearer"
subject_confirmation_data = SubElement(
subject_confirmation, f"{{{NS_SAML_ASSERTION}}}SubjectConfirmationData"
)
if self.auth_n_request.id:
subject_confirmation_data.attrib["InResponseTo"] = self.auth_n_request.id
subject_confirmation_data.attrib["NotOnOrAfter"] = self._valid_not_on_or_after
subject_confirmation_data.attrib["Recipient"] = self.provider.acs_url
return subject
def get_assertion(self) -> Element:
"""Generate Main Assertion Element"""
assertion = Element(f"{{{NS_SAML_ASSERTION}}}Assertion", nsmap=NS_MAP)
assertion.attrib["Version"] = "2.0"
assertion.attrib["ID"] = self._assertion_id
assertion.attrib["IssueInstant"] = self._issue_instant
assertion.append(self.get_issuer())
if self.provider.signing_kp:
sign_algorithm_transform = SIGN_ALGORITHM_TRANSFORM_MAP.get(
self.provider.signature_algorithm, xmlsec.constants.TransformRsaSha1
)
signature = xmlsec.template.create(
assertion,
xmlsec.constants.TransformExclC14N,
sign_algorithm_transform,
ns="ds", # type: ignore
)
assertion.append(signature)
assertion.append(self.get_assertion_subject())
assertion.append(self.get_assertion_conditions())
assertion.append(self.get_assertion_auth_n_statement())
assertion.append(self.get_attributes())
return assertion
def get_response(self) -> Element:
"""Generate Root response element"""
response = Element(f"{{{NS_SAML_PROTOCOL}}}Response", nsmap=NS_MAP)
response.attrib["Version"] = "2.0"
response.attrib["IssueInstant"] = self._issue_instant
response.attrib["Destination"] = self.provider.acs_url
response.attrib["ID"] = get_random_id()
if self.auth_n_request.id:
response.attrib["InResponseTo"] = self.auth_n_request.id
response.append(self.get_issuer())
status = SubElement(response, f"{{{NS_SAML_PROTOCOL}}}Status")
status_code = SubElement(status, f"{{{NS_SAML_PROTOCOL}}}StatusCode")
status_code.attrib["Value"] = "urn:oasis:names:tc:SAML:2.0:status:Success"
response.append(self.get_assertion())
return response
def build_response(self) -> str:
"""Build string XML Response and sign if signing is enabled."""
root_response = self.get_response()
if self.provider.signing_kp:
digest_algorithm_transform = DIGEST_ALGORITHM_TRANSLATION_MAP.get(
self.provider.digest_algorithm, xmlsec.constants.TransformSha1
)
assertion = root_response.xpath("//saml:Assertion", namespaces=NS_MAP)[0]
xmlsec.tree.add_ids(assertion, ["ID"])
signature_node = xmlsec.tree.find_node(assertion, xmlsec.constants.NodeSignature)
ref = xmlsec.template.add_reference(
signature_node,
digest_algorithm_transform,
uri="#" + self._assertion_id,
)
xmlsec.template.add_transform(ref, xmlsec.constants.TransformEnveloped)
xmlsec.template.add_transform(ref, xmlsec.constants.TransformExclC14N)
key_info = xmlsec.template.ensure_key_info(signature_node)
xmlsec.template.add_x509_data(key_info)
ctx = xmlsec.SignatureContext()
key = xmlsec.Key.from_memory(
self.provider.signing_kp.key_data,
xmlsec.constants.KeyDataFormatPem,
None,
)
key.load_cert_from_memory(
self.provider.signing_kp.certificate_data,
xmlsec.constants.KeyDataFormatCertPem,
)
ctx.key = key
ctx.sign(signature_node)
return etree.tostring(root_response).decode("utf-8") # nosec | authentik/providers/saml/processors/assertion.py | from hashlib import sha256
from types import GeneratorType
import xmlsec
from django.http import HttpRequest
from lxml import etree # nosec
from lxml.etree import Element, SubElement # nosec
from structlog.stdlib import get_logger
from authentik.core.exceptions import PropertyMappingExpressionException
from authentik.events.models import Event, EventAction
from authentik.lib.utils.time import timedelta_from_string
from authentik.providers.saml.models import SAMLPropertyMapping, SAMLProvider
from authentik.providers.saml.processors.request_parser import AuthNRequest
from authentik.providers.saml.utils import get_random_id
from authentik.providers.saml.utils.time import get_time_string
from authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME
from authentik.sources.saml.exceptions import UnsupportedNameIDFormat
from authentik.sources.saml.processors.constants import (
DIGEST_ALGORITHM_TRANSLATION_MAP,
NS_MAP,
NS_SAML_ASSERTION,
NS_SAML_PROTOCOL,
SAML_NAME_ID_FORMAT_EMAIL,
SAML_NAME_ID_FORMAT_PERSISTENT,
SAML_NAME_ID_FORMAT_TRANSIENT,
SAML_NAME_ID_FORMAT_UNSPECIFIED,
SAML_NAME_ID_FORMAT_WINDOWS,
SAML_NAME_ID_FORMAT_X509,
SIGN_ALGORITHM_TRANSFORM_MAP,
)
LOGGER = get_logger()
class AssertionProcessor:
"""Generate a SAML Response from an AuthNRequest"""
provider: SAMLProvider
http_request: HttpRequest
auth_n_request: AuthNRequest
_issue_instant: str
_assertion_id: str
_valid_not_before: str
_session_not_on_or_after: str
_valid_not_on_or_after: str
def __init__(self, provider: SAMLProvider, request: HttpRequest, auth_n_request: AuthNRequest):
self.provider = provider
self.http_request = request
self.auth_n_request = auth_n_request
self._issue_instant = get_time_string()
self._assertion_id = get_random_id()
self._valid_not_before = get_time_string(
timedelta_from_string(self.provider.assertion_valid_not_before)
)
self._session_not_on_or_after = get_time_string(
timedelta_from_string(self.provider.session_valid_not_on_or_after)
)
self._valid_not_on_or_after = get_time_string(
timedelta_from_string(self.provider.assertion_valid_not_on_or_after)
)
def get_attributes(self) -> Element:
"""Get AttributeStatement Element with Attributes from Property Mappings."""
# https://commons.lbl.gov/display/IDMgmt/Attribute+Definitions
attribute_statement = Element(f"{{{NS_SAML_ASSERTION}}}AttributeStatement")
user = self.http_request.user
for mapping in self.provider.property_mappings.all().select_subclasses():
if not isinstance(mapping, SAMLPropertyMapping):
continue
try:
mapping: SAMLPropertyMapping
value = mapping.evaluate(
user=user,
request=self.http_request,
provider=self.provider,
)
if value is None:
continue
attribute = Element(f"{{{NS_SAML_ASSERTION}}}Attribute")
if mapping.friendly_name and mapping.friendly_name != "":
attribute.attrib["FriendlyName"] = mapping.friendly_name
attribute.attrib["Name"] = mapping.saml_name
if not isinstance(value, (list, GeneratorType)):
value = [value]
for value_item in value:
attribute_value = SubElement(
attribute, f"{{{NS_SAML_ASSERTION}}}AttributeValue"
)
if not isinstance(value_item, str):
value_item = str(value_item)
attribute_value.text = value_item
attribute_statement.append(attribute)
except (PropertyMappingExpressionException, ValueError) as exc:
# Value error can be raised when assigning invalid data to an attribute
Event.new(
EventAction.CONFIGURATION_ERROR,
message=f"Failed to evaluate property-mapping: {str(exc)}",
mapping=mapping,
).from_http(self.http_request)
continue
return attribute_statement
def get_issuer(self) -> Element:
"""Get Issuer Element"""
issuer = Element(f"{{{NS_SAML_ASSERTION}}}Issuer", nsmap=NS_MAP)
issuer.text = self.provider.issuer
return issuer
def get_assertion_auth_n_statement(self) -> Element:
"""Generate AuthnStatement with AuthnContext and ContextClassRef Elements."""
auth_n_statement = Element(f"{{{NS_SAML_ASSERTION}}}AuthnStatement")
auth_n_statement.attrib["AuthnInstant"] = self._valid_not_before
auth_n_statement.attrib["SessionIndex"] = self._assertion_id
auth_n_statement.attrib["SessionNotOnOrAfter"] = self._session_not_on_or_after
auth_n_context = SubElement(auth_n_statement, f"{{{NS_SAML_ASSERTION}}}AuthnContext")
auth_n_context_class_ref = SubElement(
auth_n_context, f"{{{NS_SAML_ASSERTION}}}AuthnContextClassRef"
)
auth_n_context_class_ref.text = (
"urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport"
)
return auth_n_statement
def get_assertion_conditions(self) -> Element:
"""Generate Conditions with AudienceRestriction and Audience Elements."""
conditions = Element(f"{{{NS_SAML_ASSERTION}}}Conditions")
conditions.attrib["NotBefore"] = self._valid_not_before
conditions.attrib["NotOnOrAfter"] = self._valid_not_on_or_after
if self.provider.audience != "":
audience_restriction = SubElement(
conditions, f"{{{NS_SAML_ASSERTION}}}AudienceRestriction"
)
audience = SubElement(audience_restriction, f"{{{NS_SAML_ASSERTION}}}Audience")
audience.text = self.provider.audience
return conditions
# pylint: disable=too-many-return-statements
def get_name_id(self) -> Element:
"""Get NameID Element"""
name_id = Element(f"{{{NS_SAML_ASSERTION}}}NameID")
name_id.attrib["Format"] = self.auth_n_request.name_id_policy
# persistent is used as a fallback, so always generate it
persistent = self.http_request.user.uid
name_id.text = persistent
# If name_id_mapping is set, we override the value, regardless of what the SP asks for
if self.provider.name_id_mapping:
try:
value = self.provider.name_id_mapping.evaluate(
user=self.http_request.user,
request=self.http_request,
provider=self.provider,
)
if value is not None:
name_id.text = str(value)
return name_id
except PropertyMappingExpressionException as exc:
Event.new(
EventAction.CONFIGURATION_ERROR,
message=f"Failed to evaluate property-mapping: {str(exc)}",
mapping=self.provider.name_id_mapping,
).from_http(self.http_request)
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_EMAIL:
name_id.text = self.http_request.user.email
return name_id
if name_id.attrib["Format"] in [
SAML_NAME_ID_FORMAT_PERSISTENT,
SAML_NAME_ID_FORMAT_UNSPECIFIED,
]:
name_id.text = persistent
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_X509:
# This attribute is statically set by the LDAP source
name_id.text = self.http_request.user.attributes.get(
LDAP_DISTINGUISHED_NAME, persistent
)
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_WINDOWS:
# This attribute is statically set by the LDAP source
name_id.text = self.http_request.user.attributes.get("upn", persistent)
return name_id
if name_id.attrib["Format"] == SAML_NAME_ID_FORMAT_TRANSIENT:
# Use the hash of the user's session, which changes every session
session_key: str = self.http_request.session.session_key
name_id.text = sha256(session_key.encode()).hexdigest()
return name_id
raise UnsupportedNameIDFormat(
f"Assertion contains NameID with unsupported format {name_id.attrib['Format']}."
)
def get_assertion_subject(self) -> Element:
"""Generate Subject Element with NameID and SubjectConfirmation Objects"""
subject = Element(f"{{{NS_SAML_ASSERTION}}}Subject")
subject.append(self.get_name_id())
subject_confirmation = SubElement(subject, f"{{{NS_SAML_ASSERTION}}}SubjectConfirmation")
subject_confirmation.attrib["Method"] = "urn:oasis:names:tc:SAML:2.0:cm:bearer"
subject_confirmation_data = SubElement(
subject_confirmation, f"{{{NS_SAML_ASSERTION}}}SubjectConfirmationData"
)
if self.auth_n_request.id:
subject_confirmation_data.attrib["InResponseTo"] = self.auth_n_request.id
subject_confirmation_data.attrib["NotOnOrAfter"] = self._valid_not_on_or_after
subject_confirmation_data.attrib["Recipient"] = self.provider.acs_url
return subject
def get_assertion(self) -> Element:
"""Generate Main Assertion Element"""
assertion = Element(f"{{{NS_SAML_ASSERTION}}}Assertion", nsmap=NS_MAP)
assertion.attrib["Version"] = "2.0"
assertion.attrib["ID"] = self._assertion_id
assertion.attrib["IssueInstant"] = self._issue_instant
assertion.append(self.get_issuer())
if self.provider.signing_kp:
sign_algorithm_transform = SIGN_ALGORITHM_TRANSFORM_MAP.get(
self.provider.signature_algorithm, xmlsec.constants.TransformRsaSha1
)
signature = xmlsec.template.create(
assertion,
xmlsec.constants.TransformExclC14N,
sign_algorithm_transform,
ns="ds", # type: ignore
)
assertion.append(signature)
assertion.append(self.get_assertion_subject())
assertion.append(self.get_assertion_conditions())
assertion.append(self.get_assertion_auth_n_statement())
assertion.append(self.get_attributes())
return assertion
def get_response(self) -> Element:
"""Generate Root response element"""
response = Element(f"{{{NS_SAML_PROTOCOL}}}Response", nsmap=NS_MAP)
response.attrib["Version"] = "2.0"
response.attrib["IssueInstant"] = self._issue_instant
response.attrib["Destination"] = self.provider.acs_url
response.attrib["ID"] = get_random_id()
if self.auth_n_request.id:
response.attrib["InResponseTo"] = self.auth_n_request.id
response.append(self.get_issuer())
status = SubElement(response, f"{{{NS_SAML_PROTOCOL}}}Status")
status_code = SubElement(status, f"{{{NS_SAML_PROTOCOL}}}StatusCode")
status_code.attrib["Value"] = "urn:oasis:names:tc:SAML:2.0:status:Success"
response.append(self.get_assertion())
return response
def build_response(self) -> str:
"""Build string XML Response and sign if signing is enabled."""
root_response = self.get_response()
if self.provider.signing_kp:
digest_algorithm_transform = DIGEST_ALGORITHM_TRANSLATION_MAP.get(
self.provider.digest_algorithm, xmlsec.constants.TransformSha1
)
assertion = root_response.xpath("//saml:Assertion", namespaces=NS_MAP)[0]
xmlsec.tree.add_ids(assertion, ["ID"])
signature_node = xmlsec.tree.find_node(assertion, xmlsec.constants.NodeSignature)
ref = xmlsec.template.add_reference(
signature_node,
digest_algorithm_transform,
uri="#" + self._assertion_id,
)
xmlsec.template.add_transform(ref, xmlsec.constants.TransformEnveloped)
xmlsec.template.add_transform(ref, xmlsec.constants.TransformExclC14N)
key_info = xmlsec.template.ensure_key_info(signature_node)
xmlsec.template.add_x509_data(key_info)
ctx = xmlsec.SignatureContext()
key = xmlsec.Key.from_memory(
self.provider.signing_kp.key_data,
xmlsec.constants.KeyDataFormatPem,
None,
)
key.load_cert_from_memory(
self.provider.signing_kp.certificate_data,
xmlsec.constants.KeyDataFormatCertPem,
)
ctx.key = key
ctx.sign(signature_node)
return etree.tostring(root_response).decode("utf-8") # nosec | 0.620852 | 0.18139 |
import errno
import os
import sys
import _psutil_osx
import _psutil_posix
from psutil import _psposix
from psutil.error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._compat import namedtuple
from psutil._common import *
__extra__all__ = []
# --- constants
NUM_CPUS = _psutil_osx.get_num_cpus()
BOOT_TIME = _psutil_osx.get_system_boot_time()
_TERMINAL_MAP = _psposix._get_terminal_map()
_cputimes_ntuple = namedtuple('cputimes', 'user nice system idle')
# --- functions
def phymem_usage():
"""Physical system memory as a (total, used, free) tuple."""
total = _psutil_osx.get_total_phymem()
free = _psutil_osx.get_avail_phymem()
used = total - free
percent = usage_percent(used, total, _round=1)
return nt_sysmeminfo(total, used, free, percent)
def virtmem_usage():
"""Virtual system memory as a (total, used, free) tuple."""
total = _psutil_osx.get_total_virtmem()
free = _psutil_osx.get_avail_virtmem()
used = total - free
percent = usage_percent(used, total, _round=1)
return nt_sysmeminfo(total, used, free, percent)
def get_system_cpu_times():
"""Return system CPU times as a namedtuple."""
user, nice, system, idle = _psutil_osx.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle)
def get_system_per_cpu_times():
"""Return system CPU times as a named tuple"""
ret = []
for cpu_t in _psutil_osx.get_system_per_cpu_times():
user, nice, system, idle = cpu_t
item = _cputimes_ntuple(user, nice, system, idle)
ret.append(item)
return ret
def disk_partitions(all=False):
retlist = []
partitions = _psutil_osx.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if not os.path.isabs(device) \
or not os.path.exists(device):
continue
ntuple = nt_partition(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def get_system_users():
retlist = []
rawlist = _psutil_osx.get_system_users()
for item in rawlist:
user, tty, hostname, tstamp = item
if tty == '~':
continue # reboot or shutdown
if not tstamp:
continue
nt = nt_user(user, tty or None, hostname or None, tstamp)
retlist.append(nt)
return retlist
get_pid_list = _psutil_osx.get_pid_list
pid_exists = _psposix.pid_exists
get_disk_usage = _psposix.get_disk_usage
network_io_counters = _psutil_osx.get_network_io_counters
disk_io_counters = _psutil_osx.get_disk_io_counters
# --- decorator
def wrap_exceptions(callable):
"""Call callable into a try/except clause so that if an
OSError EPERM exception is raised we translate it into
psutil.AccessDenied.
"""
def wrapper(self, *args, **kwargs):
try:
return callable(self, *args, **kwargs)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
_status_map = {
_psutil_osx.SIDL : STATUS_IDLE,
_psutil_osx.SRUN : STATUS_RUNNING,
_psutil_osx.SSLEEP : STATUS_SLEEPING,
_psutil_osx.SSTOP : STATUS_STOPPED,
_psutil_osx.SZOMB : STATUS_ZOMBIE,
}
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
"""Return process name as a string of limited len (15)."""
return _psutil_osx.get_process_name(self.pid)
def get_process_exe(self):
# no such thing as "exe" on OS X; it will maybe be determined
# later from cmdline[0]
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._process_name)
return ""
@wrap_exceptions
def get_process_cmdline(self):
"""Return process cmdline as a list of arguments."""
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._process_name)
return _psutil_osx.get_process_cmdline(self.pid)
@wrap_exceptions
def get_process_ppid(self):
"""Return process parent pid."""
return _psutil_osx.get_process_ppid(self.pid)
@wrap_exceptions
def get_process_uids(self):
real, effective, saved = _psutil_osx.get_process_uids(self.pid)
return nt_uids(real, effective, saved)
@wrap_exceptions
def get_process_gids(self):
real, effective, saved = _psutil_osx.get_process_gids(self.pid)
return nt_gids(real, effective, saved)
@wrap_exceptions
def get_process_terminal(self):
tty_nr = _psutil_osx.get_process_tty_nr(self.pid)
try:
return _TERMINAL_MAP[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms = _psutil_osx.get_memory_info(self.pid)
return nt_meminfo(rss, vms)
@wrap_exceptions
def get_cpu_times(self):
user, system = _psutil_osx.get_cpu_times(self.pid)
return nt_cputimes(user, system)
@wrap_exceptions
def get_process_create_time(self):
"""Return the start time of the process as a number of seconds since
the epoch."""
return _psutil_osx.get_process_create_time(self.pid)
@wrap_exceptions
def get_process_num_threads(self):
"""Return the number of threads belonging to the process."""
return _psutil_osx.get_process_num_threads(self.pid)
@wrap_exceptions
def get_open_files(self):
"""Return files opened by process."""
if self.pid == 0:
return []
files = []
rawlist = _psutil_osx.get_process_open_files(self.pid)
for path, fd in rawlist:
if os.path.isfile(path):
ntuple = nt_openfile(path, fd)
files.append(ntuple)
return files
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return etwork connections opened by a process as a list of
namedtuples.
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
ret = _psutil_osx.get_process_connections(self.pid, families, types)
return [nt_connection(*conn) for conn in ret]
@wrap_exceptions
def get_num_fds(self):
if self.pid == 0:
return 0
return _psutil_osx.get_process_num_fds(self.pid)
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_nice(self):
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def get_process_status(self):
code = _psutil_osx.get_process_status(self.pid)
if code in _status_map:
return _status_map[code]
return constant(-1, "?")
@wrap_exceptions
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_osx.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
nt_mmap_grouped = namedtuple('mmap',
'path rss private swapped dirtied ref_count shadow_depth')
nt_mmap_ext = namedtuple('mmap',
'addr perms path rss private swapped dirtied ref_count shadow_depth')
@wrap_exceptions
def get_memory_maps(self):
return _psutil_osx.get_process_memory_maps(self.pid) | Script/psutil-0.5.0/psutil/_psosx.py | import errno
import os
import sys
import _psutil_osx
import _psutil_posix
from psutil import _psposix
from psutil.error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._compat import namedtuple
from psutil._common import *
__extra__all__ = []
# --- constants
NUM_CPUS = _psutil_osx.get_num_cpus()
BOOT_TIME = _psutil_osx.get_system_boot_time()
_TERMINAL_MAP = _psposix._get_terminal_map()
_cputimes_ntuple = namedtuple('cputimes', 'user nice system idle')
# --- functions
def phymem_usage():
"""Physical system memory as a (total, used, free) tuple."""
total = _psutil_osx.get_total_phymem()
free = _psutil_osx.get_avail_phymem()
used = total - free
percent = usage_percent(used, total, _round=1)
return nt_sysmeminfo(total, used, free, percent)
def virtmem_usage():
"""Virtual system memory as a (total, used, free) tuple."""
total = _psutil_osx.get_total_virtmem()
free = _psutil_osx.get_avail_virtmem()
used = total - free
percent = usage_percent(used, total, _round=1)
return nt_sysmeminfo(total, used, free, percent)
def get_system_cpu_times():
"""Return system CPU times as a namedtuple."""
user, nice, system, idle = _psutil_osx.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle)
def get_system_per_cpu_times():
"""Return system CPU times as a named tuple"""
ret = []
for cpu_t in _psutil_osx.get_system_per_cpu_times():
user, nice, system, idle = cpu_t
item = _cputimes_ntuple(user, nice, system, idle)
ret.append(item)
return ret
def disk_partitions(all=False):
retlist = []
partitions = _psutil_osx.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if not os.path.isabs(device) \
or not os.path.exists(device):
continue
ntuple = nt_partition(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def get_system_users():
retlist = []
rawlist = _psutil_osx.get_system_users()
for item in rawlist:
user, tty, hostname, tstamp = item
if tty == '~':
continue # reboot or shutdown
if not tstamp:
continue
nt = nt_user(user, tty or None, hostname or None, tstamp)
retlist.append(nt)
return retlist
get_pid_list = _psutil_osx.get_pid_list
pid_exists = _psposix.pid_exists
get_disk_usage = _psposix.get_disk_usage
network_io_counters = _psutil_osx.get_network_io_counters
disk_io_counters = _psutil_osx.get_disk_io_counters
# --- decorator
def wrap_exceptions(callable):
"""Call callable into a try/except clause so that if an
OSError EPERM exception is raised we translate it into
psutil.AccessDenied.
"""
def wrapper(self, *args, **kwargs):
try:
return callable(self, *args, **kwargs)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
_status_map = {
_psutil_osx.SIDL : STATUS_IDLE,
_psutil_osx.SRUN : STATUS_RUNNING,
_psutil_osx.SSLEEP : STATUS_SLEEPING,
_psutil_osx.SSTOP : STATUS_STOPPED,
_psutil_osx.SZOMB : STATUS_ZOMBIE,
}
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
"""Return process name as a string of limited len (15)."""
return _psutil_osx.get_process_name(self.pid)
def get_process_exe(self):
# no such thing as "exe" on OS X; it will maybe be determined
# later from cmdline[0]
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._process_name)
return ""
@wrap_exceptions
def get_process_cmdline(self):
"""Return process cmdline as a list of arguments."""
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._process_name)
return _psutil_osx.get_process_cmdline(self.pid)
@wrap_exceptions
def get_process_ppid(self):
"""Return process parent pid."""
return _psutil_osx.get_process_ppid(self.pid)
@wrap_exceptions
def get_process_uids(self):
real, effective, saved = _psutil_osx.get_process_uids(self.pid)
return nt_uids(real, effective, saved)
@wrap_exceptions
def get_process_gids(self):
real, effective, saved = _psutil_osx.get_process_gids(self.pid)
return nt_gids(real, effective, saved)
@wrap_exceptions
def get_process_terminal(self):
tty_nr = _psutil_osx.get_process_tty_nr(self.pid)
try:
return _TERMINAL_MAP[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms = _psutil_osx.get_memory_info(self.pid)
return nt_meminfo(rss, vms)
@wrap_exceptions
def get_cpu_times(self):
user, system = _psutil_osx.get_cpu_times(self.pid)
return nt_cputimes(user, system)
@wrap_exceptions
def get_process_create_time(self):
"""Return the start time of the process as a number of seconds since
the epoch."""
return _psutil_osx.get_process_create_time(self.pid)
@wrap_exceptions
def get_process_num_threads(self):
"""Return the number of threads belonging to the process."""
return _psutil_osx.get_process_num_threads(self.pid)
@wrap_exceptions
def get_open_files(self):
"""Return files opened by process."""
if self.pid == 0:
return []
files = []
rawlist = _psutil_osx.get_process_open_files(self.pid)
for path, fd in rawlist:
if os.path.isfile(path):
ntuple = nt_openfile(path, fd)
files.append(ntuple)
return files
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return etwork connections opened by a process as a list of
namedtuples.
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
ret = _psutil_osx.get_process_connections(self.pid, families, types)
return [nt_connection(*conn) for conn in ret]
@wrap_exceptions
def get_num_fds(self):
if self.pid == 0:
return 0
return _psutil_osx.get_process_num_fds(self.pid)
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_nice(self):
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def get_process_status(self):
code = _psutil_osx.get_process_status(self.pid)
if code in _status_map:
return _status_map[code]
return constant(-1, "?")
@wrap_exceptions
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_osx.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
nt_mmap_grouped = namedtuple('mmap',
'path rss private swapped dirtied ref_count shadow_depth')
nt_mmap_ext = namedtuple('mmap',
'addr perms path rss private swapped dirtied ref_count shadow_depth')
@wrap_exceptions
def get_memory_maps(self):
return _psutil_osx.get_process_memory_maps(self.pid) | 0.480966 | 0.080792 |
import logging
from pathlib import Path
import numpy as np
from PIL import Image
from proteus.models.base import BaseModel
from proteus.models.base.modelconfigs import (
BaseModelConfig,
BatchingModelConfig,
QuantizationModelConfig,
TritonOptimizationModelConfig,
)
from resizeimage import resizeimage
folder_path = Path(__file__).parent
logger = logging.getLogger(__name__)
class ModelConfig(
BaseModelConfig,
TritonOptimizationModelConfig,
BatchingModelConfig,
QuantizationModelConfig, # this will require ONNX opset 11
):
pass
class SuperResolution(BaseModel):
DESCRIPTION = (
"Implementation of Sub-Pixel CNN (2016) - https://arxiv.org/abs/1609.05158"
)
MODEL_URL = "https://github.com/onnx/models/raw/master/vision/super_resolution/sub_pixel_cnn_2016/model/super-resolution-10.onnx"
CONFIG_PATH = f"{folder_path}/config.template"
INPUT_NAME = "input"
OUTPUT_NAMES = ["output"]
DTYPE = "FP32"
MODEL_CONFIG = ModelConfig
@classmethod
def preprocess(cls, img):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
:param img: Pillow image
:returns:
- model_input: input as required by the model
- extra_data: dict of data that is needed by the postprocess function
"""
extra_data = {}
img = resizeimage.resize_cover(img, [224, 224], validate=False)
img_ycbcr = img.convert("YCbCr")
img_y_0, img_cb, img_cr = img_ycbcr.split()
img_ndarray = np.asarray(img_y_0)
img_4 = np.expand_dims(img_ndarray, axis=0)
model_input = img_4.astype(np.float32) / 255.0
# Save some parts in the PREDICTION_DATA store for postprocess
extra_data["img_cb"] = img_cb
extra_data["img_cr"] = img_cr
return model_input, extra_data
@classmethod
def postprocess(cls, results, extra_data, batch_size, batching):
"""
Post-process results to return valid outputs.
:param results: model outputs
:param extra_data: dict of data that is needed by the postprocess function
:param batch_size
:param batching: boolean flag indicating if batching
:returns: json result
"""
# Fetch from the PREDICTION_DATA store
img_cb = extra_data["img_cb"]
img_cr = extra_data["img_cr"]
output_name = cls.OUTPUT_NAMES[0]
results = results.as_numpy(output_name)
logger.debug(results)
img_out_y = Image.fromarray(
np.uint8((results[0] * 255.0).clip(0, 255)[0]), mode="L"
)
final_img = Image.merge(
"YCbCr",
[
img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC),
],
).convert("RGB")
logger.debug(final_img)
return final_img | packages/proteus.models.superres/proteus/models/superres/client.py | import logging
from pathlib import Path
import numpy as np
from PIL import Image
from proteus.models.base import BaseModel
from proteus.models.base.modelconfigs import (
BaseModelConfig,
BatchingModelConfig,
QuantizationModelConfig,
TritonOptimizationModelConfig,
)
from resizeimage import resizeimage
folder_path = Path(__file__).parent
logger = logging.getLogger(__name__)
class ModelConfig(
BaseModelConfig,
TritonOptimizationModelConfig,
BatchingModelConfig,
QuantizationModelConfig, # this will require ONNX opset 11
):
pass
class SuperResolution(BaseModel):
DESCRIPTION = (
"Implementation of Sub-Pixel CNN (2016) - https://arxiv.org/abs/1609.05158"
)
MODEL_URL = "https://github.com/onnx/models/raw/master/vision/super_resolution/sub_pixel_cnn_2016/model/super-resolution-10.onnx"
CONFIG_PATH = f"{folder_path}/config.template"
INPUT_NAME = "input"
OUTPUT_NAMES = ["output"]
DTYPE = "FP32"
MODEL_CONFIG = ModelConfig
@classmethod
def preprocess(cls, img):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
:param img: Pillow image
:returns:
- model_input: input as required by the model
- extra_data: dict of data that is needed by the postprocess function
"""
extra_data = {}
img = resizeimage.resize_cover(img, [224, 224], validate=False)
img_ycbcr = img.convert("YCbCr")
img_y_0, img_cb, img_cr = img_ycbcr.split()
img_ndarray = np.asarray(img_y_0)
img_4 = np.expand_dims(img_ndarray, axis=0)
model_input = img_4.astype(np.float32) / 255.0
# Save some parts in the PREDICTION_DATA store for postprocess
extra_data["img_cb"] = img_cb
extra_data["img_cr"] = img_cr
return model_input, extra_data
@classmethod
def postprocess(cls, results, extra_data, batch_size, batching):
"""
Post-process results to return valid outputs.
:param results: model outputs
:param extra_data: dict of data that is needed by the postprocess function
:param batch_size
:param batching: boolean flag indicating if batching
:returns: json result
"""
# Fetch from the PREDICTION_DATA store
img_cb = extra_data["img_cb"]
img_cr = extra_data["img_cr"]
output_name = cls.OUTPUT_NAMES[0]
results = results.as_numpy(output_name)
logger.debug(results)
img_out_y = Image.fromarray(
np.uint8((results[0] * 255.0).clip(0, 255)[0]), mode="L"
)
final_img = Image.merge(
"YCbCr",
[
img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC),
],
).convert("RGB")
logger.debug(final_img)
return final_img | 0.804367 | 0.152253 |
import os
import json
import time
from textwrap import dedent
from .. import run_nbgrader
from .base import BaseTestApp
from .conftest import notwindows
@notwindows
class TestNbGraderList(BaseTestApp):
def _release(self, assignment, exchange, cache, course_dir, course="abc101"):
self._copy_file(os.path.join("files", "test.ipynb"), os.path.join(course_dir, "release", assignment, "p1.ipynb"))
run_nbgrader([
"release", assignment,
"--course", course,
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
])
def _fetch(self, assignment, exchange, cache, course="abc101", flags=None):
cmd = [
"fetch", assignment,
"--course", course,
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd)
def _submit(self, assignment, exchange, cache, course="abc101", flags=None):
cmd = [
"submit", assignment,
"--course", course,
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd)
def _list(self, exchange, cache, assignment=None, flags=None, retcode=0):
cmd = [
"list",
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange),
]
if flags is not None:
cmd.extend(flags)
if assignment is not None:
cmd.append(assignment)
if flags and '--json' in flags:
stdout = True
else:
stdout = False
return run_nbgrader(cmd, retcode=retcode, stdout=stdout)
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["list", "--help-all"])
def test_list_released(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._release("ps1", exchange, cache, course_dir, course="xyz200")
output = self._list(exchange, cache, "ps1", flags=["--course", "abc101"])
assert output == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
"""
).lstrip()
assert self._list(exchange, cache, "ps1", flags=["--course", "xyz200"]) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] xyz200 ps1
"""
).lstrip()
assert self._list(exchange, cache, "ps1") == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
[ListApp | INFO] xyz200 ps1
"""
).lstrip()
self._release("ps2", exchange, cache, course_dir)
self._release("ps2", exchange, cache, course_dir, course="xyz200")
assert self._list(exchange, cache, "ps2") == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps2
[ListApp | INFO] xyz200 ps2
"""
).lstrip()
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
[ListApp | INFO] abc101 ps2
[ListApp | INFO] xyz200 ps1
[ListApp | INFO] xyz200 ps2
"""
).lstrip()
def test_list_fetched(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._release("ps2", exchange, cache, course_dir)
self._fetch("ps1", exchange, cache)
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1 (already downloaded)
[ListApp | INFO] abc101 ps2
"""
).lstrip()
def test_list_remove_outbound(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._release("ps2", exchange, cache, course_dir)
self._list(exchange, cache, "ps1", flags=["--remove"])
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps2
"""
).lstrip()
self._list(exchange, cache, "ps2", flags=["--remove"])
assert self._list(exchange, cache, "ps2") == dedent(
"""
[ListApp | INFO] Released assignments:
"""
).lstrip()
def test_list_inbound(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
self._fetch("ps1", exchange, cache)
self._submit("ps1", exchange, cache)
filename, = os.listdir(os.path.join(exchange, "abc101", "inbound"))
timestamp = filename.split("+")[2]
assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamp)
).lstrip()
time.sleep(1)
self._submit("ps1", exchange, cache)
filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound")))
timestamps = [x.split("+")[2] for x in filenames]
assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0], os.environ["USER"], timestamps[1])
).lstrip()
def test_list_cached(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
self._fetch("ps1", exchange, cache)
self._submit("ps1", exchange, cache)
filename, = os.listdir(os.path.join(cache, "abc101"))
timestamp = filename.split("+")[2]
assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamp)
).lstrip()
time.sleep(1)
self._submit("ps1", exchange, cache)
self._list(exchange, cache, "ps1", flags=["--inbound", "--remove"])
filenames = sorted(os.listdir(os.path.join(cache, "abc101")))
timestamps = [x.split("+")[2] for x in filenames]
assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0], os.environ["USER"], timestamps[1])
).lstrip()
def test_list_remove_inbound(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._fetch("ps1", exchange, cache)
self._release("ps2", exchange, cache, course_dir)
self._fetch("ps2", exchange, cache)
self._submit("ps1", exchange, cache)
self._submit("ps2", exchange, cache)
filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound")))
timestamps = [x.split("+")[2] for x in filenames]
self._list(exchange, cache, "ps1", flags=["--inbound", "--remove"])
assert self._list(exchange, cache, flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps2 {}
""".format(os.environ["USER"], timestamps[1])
).lstrip()
assert len(os.listdir(os.path.join(exchange, "abc101", "inbound"))) == 1
self._list(exchange, cache, "ps2", flags=["--inbound", "--remove"])
assert self._list(exchange, cache, flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
assert len(os.listdir(os.path.join(exchange, "abc101", "inbound"))) == 0
def test_list_remove_cached(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._fetch("ps1", exchange, cache)
self._release("ps2", exchange, cache, course_dir)
self._fetch("ps2", exchange, cache)
self._submit("ps1", exchange, cache)
self._submit("ps2", exchange, cache)
filenames = sorted(os.listdir(os.path.join(cache, "abc101")))
timestamps = [x.split("+")[2] for x in filenames]
self._list(exchange, cache, "ps1", flags=["--cached", "--remove"])
assert self._list(exchange, cache, flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps2 {}
""".format(os.environ["USER"], timestamps[1])
).lstrip()
assert len(os.listdir(os.path.join(cache, "abc101"))) == 1
self._list(exchange, cache, "ps2", flags=["--cached", "--remove"])
assert self._list(exchange, cache, flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
assert len(os.listdir(os.path.join(cache, "abc101"))) == 0
def test_list_cached_and_inbound(self, exchange, cache):
self._list(exchange, cache, flags=["--inbound", "--cached"], retcode=1)
def test_list_json(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
"""
).lstrip()
assert json.loads(self._list(exchange, cache, flags=["--json"])) == [
{
"assignment_id": "ps1",
"status": "released",
"course_id": "abc101",
"path": os.path.join(exchange, "abc101", "outbound", "ps1"),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
self._fetch("ps1", exchange, cache)
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1 (already downloaded)
"""
).lstrip()
assert json.loads(self._list(exchange, cache, flags=["--json"])) == [
{
"assignment_id": "ps1",
"status": "fetched",
"course_id": "abc101",
"path": os.path.abspath("ps1"),
"notebooks": [
{
"path": os.path.abspath(os.path.join("ps1", "p1.ipynb")),
"notebook_id": "p1"
}
]
}
]
self._submit("ps1", exchange, cache)
filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound")))
timestamps = [x.split("+")[2] for x in filenames]
assert self._list(exchange, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0])
).lstrip()
submission = "{}+ps1+{}".format(os.environ["USER"], timestamps[0])
assert json.loads(self._list(exchange, "ps1", flags=["--inbound", "--json"])) == [
{
"assignment_id": "ps1",
"status": "submitted",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(exchange, "abc101", "inbound", submission),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "inbound", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
assert json.loads(self._list(exchange, "ps1", flags=["--remove", "--inbound", "--json"])) == [
{
"assignment_id": "ps1",
"status": "removed",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(exchange, "abc101", "inbound", submission),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "inbound", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
assert self._list(exchange, cache, flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0])
).lstrip()
submission = "{}+ps1+{}".format(os.environ["USER"], timestamps[0])
assert json.loads(self._list(exchange, cache, flags=["--cached", "--json"])) == [
{
"assignment_id": "ps1",
"status": "submitted",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(cache, "abc101", submission),
"notebooks": [
{
"path": os.path.join(cache, "abc101", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
assert json.loads(self._list(exchange, cache, flags=["--remove", "--cached", "--json"])) == [
{
"assignment_id": "ps1",
"status": "removed",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(cache, "abc101", submission),
"notebooks": [
{
"path": os.path.join(cache, "abc101", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
def test_list_json_multiple_courses(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir, course="abc101")
self._release("ps1", exchange, cache, course_dir, course="abc102")
assert json.loads(self._list(exchange, cache, flags=["--json", "--TransferApp.path_includes_course=True"])) == [
{
"assignment_id": "ps1",
"status": "released",
"course_id": "abc101",
"path": os.path.join(exchange, "abc101", "outbound", "ps1"),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"),
"notebook_id": "p1"
}
]
},
{
"assignment_id": "ps1",
"status": "released",
"course_id": "abc102",
"path": os.path.join(exchange, "abc102", "outbound", "ps1"),
"notebooks": [
{
"path": os.path.join(exchange, "abc102", "outbound", "ps1", "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
self._fetch("ps1", exchange, cache, course="abc101", flags=["--TransferApp.path_includes_course=True"])
self._fetch("ps1", exchange, cache, course="abc102", flags=["--TransferApp.path_includes_course=True"])
assert json.loads(self._list(exchange, cache, flags=["--json", "--TransferApp.path_includes_course=True"])) == [
{
"assignment_id": "ps1",
"status": "fetched",
"course_id": "abc101",
"path": os.path.abspath(os.path.join("abc101", "ps1")),
"notebooks": [
{
"path": os.path.abspath(os.path.join("abc101", "ps1", "p1.ipynb")),
"notebook_id": "p1"
}
]
},
{
"assignment_id": "ps1",
"status": "fetched",
"course_id": "abc102",
"path": os.path.abspath(os.path.join("abc102", "ps1")),
"notebooks": [
{
"path": os.path.abspath(os.path.join("abc102", "ps1", "p1.ipynb")),
"notebook_id": "p1"
}
]
}
] | nbgrader/tests/apps/test_nbgrader_list.py | import os
import json
import time
from textwrap import dedent
from .. import run_nbgrader
from .base import BaseTestApp
from .conftest import notwindows
@notwindows
class TestNbGraderList(BaseTestApp):
def _release(self, assignment, exchange, cache, course_dir, course="abc101"):
self._copy_file(os.path.join("files", "test.ipynb"), os.path.join(course_dir, "release", assignment, "p1.ipynb"))
run_nbgrader([
"release", assignment,
"--course", course,
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
])
def _fetch(self, assignment, exchange, cache, course="abc101", flags=None):
cmd = [
"fetch", assignment,
"--course", course,
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd)
def _submit(self, assignment, exchange, cache, course="abc101", flags=None):
cmd = [
"submit", assignment,
"--course", course,
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd)
def _list(self, exchange, cache, assignment=None, flags=None, retcode=0):
cmd = [
"list",
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange),
]
if flags is not None:
cmd.extend(flags)
if assignment is not None:
cmd.append(assignment)
if flags and '--json' in flags:
stdout = True
else:
stdout = False
return run_nbgrader(cmd, retcode=retcode, stdout=stdout)
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["list", "--help-all"])
def test_list_released(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._release("ps1", exchange, cache, course_dir, course="xyz200")
output = self._list(exchange, cache, "ps1", flags=["--course", "abc101"])
assert output == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
"""
).lstrip()
assert self._list(exchange, cache, "ps1", flags=["--course", "xyz200"]) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] xyz200 ps1
"""
).lstrip()
assert self._list(exchange, cache, "ps1") == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
[ListApp | INFO] xyz200 ps1
"""
).lstrip()
self._release("ps2", exchange, cache, course_dir)
self._release("ps2", exchange, cache, course_dir, course="xyz200")
assert self._list(exchange, cache, "ps2") == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps2
[ListApp | INFO] xyz200 ps2
"""
).lstrip()
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
[ListApp | INFO] abc101 ps2
[ListApp | INFO] xyz200 ps1
[ListApp | INFO] xyz200 ps2
"""
).lstrip()
def test_list_fetched(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._release("ps2", exchange, cache, course_dir)
self._fetch("ps1", exchange, cache)
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1 (already downloaded)
[ListApp | INFO] abc101 ps2
"""
).lstrip()
def test_list_remove_outbound(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._release("ps2", exchange, cache, course_dir)
self._list(exchange, cache, "ps1", flags=["--remove"])
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps2
"""
).lstrip()
self._list(exchange, cache, "ps2", flags=["--remove"])
assert self._list(exchange, cache, "ps2") == dedent(
"""
[ListApp | INFO] Released assignments:
"""
).lstrip()
def test_list_inbound(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
self._fetch("ps1", exchange, cache)
self._submit("ps1", exchange, cache)
filename, = os.listdir(os.path.join(exchange, "abc101", "inbound"))
timestamp = filename.split("+")[2]
assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamp)
).lstrip()
time.sleep(1)
self._submit("ps1", exchange, cache)
filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound")))
timestamps = [x.split("+")[2] for x in filenames]
assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0], os.environ["USER"], timestamps[1])
).lstrip()
def test_list_cached(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
self._fetch("ps1", exchange, cache)
self._submit("ps1", exchange, cache)
filename, = os.listdir(os.path.join(cache, "abc101"))
timestamp = filename.split("+")[2]
assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamp)
).lstrip()
time.sleep(1)
self._submit("ps1", exchange, cache)
self._list(exchange, cache, "ps1", flags=["--inbound", "--remove"])
filenames = sorted(os.listdir(os.path.join(cache, "abc101")))
timestamps = [x.split("+")[2] for x in filenames]
assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0], os.environ["USER"], timestamps[1])
).lstrip()
def test_list_remove_inbound(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._fetch("ps1", exchange, cache)
self._release("ps2", exchange, cache, course_dir)
self._fetch("ps2", exchange, cache)
self._submit("ps1", exchange, cache)
self._submit("ps2", exchange, cache)
filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound")))
timestamps = [x.split("+")[2] for x in filenames]
self._list(exchange, cache, "ps1", flags=["--inbound", "--remove"])
assert self._list(exchange, cache, flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps2 {}
""".format(os.environ["USER"], timestamps[1])
).lstrip()
assert len(os.listdir(os.path.join(exchange, "abc101", "inbound"))) == 1
self._list(exchange, cache, "ps2", flags=["--inbound", "--remove"])
assert self._list(exchange, cache, flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
assert len(os.listdir(os.path.join(exchange, "abc101", "inbound"))) == 0
def test_list_remove_cached(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
self._fetch("ps1", exchange, cache)
self._release("ps2", exchange, cache, course_dir)
self._fetch("ps2", exchange, cache)
self._submit("ps1", exchange, cache)
self._submit("ps2", exchange, cache)
filenames = sorted(os.listdir(os.path.join(cache, "abc101")))
timestamps = [x.split("+")[2] for x in filenames]
self._list(exchange, cache, "ps1", flags=["--cached", "--remove"])
assert self._list(exchange, cache, flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps2 {}
""".format(os.environ["USER"], timestamps[1])
).lstrip()
assert len(os.listdir(os.path.join(cache, "abc101"))) == 1
self._list(exchange, cache, "ps2", flags=["--cached", "--remove"])
assert self._list(exchange, cache, flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
"""
).lstrip()
assert len(os.listdir(os.path.join(cache, "abc101"))) == 0
def test_list_cached_and_inbound(self, exchange, cache):
self._list(exchange, cache, flags=["--inbound", "--cached"], retcode=1)
def test_list_json(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir)
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1
"""
).lstrip()
assert json.loads(self._list(exchange, cache, flags=["--json"])) == [
{
"assignment_id": "ps1",
"status": "released",
"course_id": "abc101",
"path": os.path.join(exchange, "abc101", "outbound", "ps1"),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
self._fetch("ps1", exchange, cache)
assert self._list(exchange, cache) == dedent(
"""
[ListApp | INFO] Released assignments:
[ListApp | INFO] abc101 ps1 (already downloaded)
"""
).lstrip()
assert json.loads(self._list(exchange, cache, flags=["--json"])) == [
{
"assignment_id": "ps1",
"status": "fetched",
"course_id": "abc101",
"path": os.path.abspath("ps1"),
"notebooks": [
{
"path": os.path.abspath(os.path.join("ps1", "p1.ipynb")),
"notebook_id": "p1"
}
]
}
]
self._submit("ps1", exchange, cache)
filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound")))
timestamps = [x.split("+")[2] for x in filenames]
assert self._list(exchange, "ps1", flags=["--inbound"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0])
).lstrip()
submission = "{}+ps1+{}".format(os.environ["USER"], timestamps[0])
assert json.loads(self._list(exchange, "ps1", flags=["--inbound", "--json"])) == [
{
"assignment_id": "ps1",
"status": "submitted",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(exchange, "abc101", "inbound", submission),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "inbound", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
assert json.loads(self._list(exchange, "ps1", flags=["--remove", "--inbound", "--json"])) == [
{
"assignment_id": "ps1",
"status": "removed",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(exchange, "abc101", "inbound", submission),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "inbound", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
assert self._list(exchange, cache, flags=["--cached"]) == dedent(
"""
[ListApp | INFO] Submitted assignments:
[ListApp | INFO] abc101 {} ps1 {}
""".format(os.environ["USER"], timestamps[0])
).lstrip()
submission = "{}+ps1+{}".format(os.environ["USER"], timestamps[0])
assert json.loads(self._list(exchange, cache, flags=["--cached", "--json"])) == [
{
"assignment_id": "ps1",
"status": "submitted",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(cache, "abc101", submission),
"notebooks": [
{
"path": os.path.join(cache, "abc101", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
assert json.loads(self._list(exchange, cache, flags=["--remove", "--cached", "--json"])) == [
{
"assignment_id": "ps1",
"status": "removed",
"course_id": "abc101",
"student_id": os.environ["USER"],
"timestamp": timestamps[0],
"path": os.path.join(cache, "abc101", submission),
"notebooks": [
{
"path": os.path.join(cache, "abc101", submission, "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
def test_list_json_multiple_courses(self, exchange, cache, course_dir):
self._release("ps1", exchange, cache, course_dir, course="abc101")
self._release("ps1", exchange, cache, course_dir, course="abc102")
assert json.loads(self._list(exchange, cache, flags=["--json", "--TransferApp.path_includes_course=True"])) == [
{
"assignment_id": "ps1",
"status": "released",
"course_id": "abc101",
"path": os.path.join(exchange, "abc101", "outbound", "ps1"),
"notebooks": [
{
"path": os.path.join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"),
"notebook_id": "p1"
}
]
},
{
"assignment_id": "ps1",
"status": "released",
"course_id": "abc102",
"path": os.path.join(exchange, "abc102", "outbound", "ps1"),
"notebooks": [
{
"path": os.path.join(exchange, "abc102", "outbound", "ps1", "p1.ipynb"),
"notebook_id": "p1"
}
]
}
]
self._fetch("ps1", exchange, cache, course="abc101", flags=["--TransferApp.path_includes_course=True"])
self._fetch("ps1", exchange, cache, course="abc102", flags=["--TransferApp.path_includes_course=True"])
assert json.loads(self._list(exchange, cache, flags=["--json", "--TransferApp.path_includes_course=True"])) == [
{
"assignment_id": "ps1",
"status": "fetched",
"course_id": "abc101",
"path": os.path.abspath(os.path.join("abc101", "ps1")),
"notebooks": [
{
"path": os.path.abspath(os.path.join("abc101", "ps1", "p1.ipynb")),
"notebook_id": "p1"
}
]
},
{
"assignment_id": "ps1",
"status": "fetched",
"course_id": "abc102",
"path": os.path.abspath(os.path.join("abc102", "ps1")),
"notebooks": [
{
"path": os.path.abspath(os.path.join("abc102", "ps1", "p1.ipynb")),
"notebook_id": "p1"
}
]
}
] | 0.469277 | 0.310133 |
import pandas as pd
import os
from pydub import AudioSegment
#TODO: Make this applicable by terminal
filenames = os.listdir(data_dir)
audio_filenames = []
for filename in filenames:
if ".csv" not in (filename.lower()):
audio_filenames.append(filename)
annotation_dict = {}
for audio_filename in audio_filenames:
#csv should have same name as audio file except extension
csv_filename = audio_filename.split(".")[0] + ".csv"
#Using C engine because it's supposed to be faster, requires delimeter to be commas
data_frame = pd.read_csv(data_dir + "/" + csv_filename, engine="c")
#Storing all annotations in dictionary
annotation_dict[audio_filename]=[]
for _, row in data_frame.iterrows():
annotation_dict[audio_filename].append(
(float(row["onset"]),
float(row["offset"]),
str(row["class"]))
)
for audio_filename in annotation_dict:
print(audio_filename)
annotations = annotation_dict[audio_filename]
new_record_start = 0
new_record_length = 10*1000
hop_length = 5 * 1000
record = AudioSegment.from_wav(data_dir + audio_filename)
i = 0
last = False
while not last:
#=> last new record from old record
if (new_record_start + new_record_length) > len(record):
new_record_start = len(record) - new_record_length
last = True
new_record = record[new_record_start:new_record_start + new_record_length]
new_annotations = []
for annotation in annotation_dict[audio_filename]:
if (annotation[0]*1000) > (new_record_start + new_record_length):
continue
elif (annotation[1]*1000) < new_record_start:
continue
else:
onset = (max(annotation[0]*1000, new_record_start) - new_record_start)/1000
offset = (min(annotation[1]*1000, new_record_start+new_record_length) - new_record_start)/1000
new_annotations.append((onset,offset,annotation[2]))
if len(new_annotations) > 0:
new_filename = audio_filename.split(".")[0] + "_" + str(i)
print(new_filename)
i+=1
new_record.export(out_f=out_dir + new_filename + ".wav", format="wav")
new_df = pd.DataFrame(new_annotations, columns=["onset", "offset", "class"])
new_df.to_csv(out_dir + new_filename + ".csv")
new_record_start += hop_length | classifier/scripts/split_files.py | import pandas as pd
import os
from pydub import AudioSegment
#TODO: Make this applicable by terminal
filenames = os.listdir(data_dir)
audio_filenames = []
for filename in filenames:
if ".csv" not in (filename.lower()):
audio_filenames.append(filename)
annotation_dict = {}
for audio_filename in audio_filenames:
#csv should have same name as audio file except extension
csv_filename = audio_filename.split(".")[0] + ".csv"
#Using C engine because it's supposed to be faster, requires delimeter to be commas
data_frame = pd.read_csv(data_dir + "/" + csv_filename, engine="c")
#Storing all annotations in dictionary
annotation_dict[audio_filename]=[]
for _, row in data_frame.iterrows():
annotation_dict[audio_filename].append(
(float(row["onset"]),
float(row["offset"]),
str(row["class"]))
)
for audio_filename in annotation_dict:
print(audio_filename)
annotations = annotation_dict[audio_filename]
new_record_start = 0
new_record_length = 10*1000
hop_length = 5 * 1000
record = AudioSegment.from_wav(data_dir + audio_filename)
i = 0
last = False
while not last:
#=> last new record from old record
if (new_record_start + new_record_length) > len(record):
new_record_start = len(record) - new_record_length
last = True
new_record = record[new_record_start:new_record_start + new_record_length]
new_annotations = []
for annotation in annotation_dict[audio_filename]:
if (annotation[0]*1000) > (new_record_start + new_record_length):
continue
elif (annotation[1]*1000) < new_record_start:
continue
else:
onset = (max(annotation[0]*1000, new_record_start) - new_record_start)/1000
offset = (min(annotation[1]*1000, new_record_start+new_record_length) - new_record_start)/1000
new_annotations.append((onset,offset,annotation[2]))
if len(new_annotations) > 0:
new_filename = audio_filename.split(".")[0] + "_" + str(i)
print(new_filename)
i+=1
new_record.export(out_f=out_dir + new_filename + ".wav", format="wav")
new_df = pd.DataFrame(new_annotations, columns=["onset", "offset", "class"])
new_df.to_csv(out_dir + new_filename + ".csv")
new_record_start += hop_length | 0.056633 | 0.088465 |
from huaweicloud_sis.client.asr_client import AsrCustomizationClient
from huaweicloud_sis.bean.asr_request import AsrCustomShortRequest
from huaweicloud_sis.utils import io_utils
import json
from huaweicloud_sis.client.tts_client import TtsCustomizationClient
from huaweicloud_sis.bean.tts_request import TtsCustomRequest
from huaweicloud_sis.bean.sis_config import SisConfig
from huaweicloud_sis.exception.exceptions import ClientException
from huaweicloud_sis.exception.exceptions import ServerException
# 鉴权参数
sys_ak = '1HYJCSYF2PKVMFRWRHBP' # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sys_sk = '321gMCTHf2pou1PiLzMeZV1PW4osIoHIG23uhaxX' # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sys_region = 'cn-north-4' # region,如cn-north-4
sys_project_id = '07a919eae80025272f94c019ddb71922' # 同region一一对应,参考https://support.huaweicloud.com/api-sis/sis_03_0008.html
"""
todo 请正确填写音频格式和模型属性字符串
1. 音频格式一定要相匹配.
例如文件或者obs url是xx.wav, 则在一句话识别是wav格式,在录音文件识别是auto。具体参考api文档。
例如音频是pcm格式,并且采样率为8k,则格式填写pcm8k16bit。
如果返回audio_format is invalid 说明该文件格式不支持。具体支持哪些音频格式,需要参考api文档。
2. 音频采样率要与属性字符串的采样率要匹配。
例如格式选择pcm16k16bit,属性字符串却选择chinese_8k_common, 则会返回'audio_format' is not match model
例如wav本身是16k采样率,属性选择chinese_8k_common, 同样会返回'audio_format' is not match model
"""
# 可改动部分
input_path = 'E:/speech_test/input5.wav'
output_path = 'E:/speech_test/output.wav'
# 指令列表 新增指令需与其反馈一一对应
order_list = ['打开药盒。', '关闭药盒。']
# 反馈列表 新增反馈需与其指令一一对应
feedback_list = ['打开成功', '关闭成功']
of_dict = dict((order, feedback) for order, feedback in zip(order_list, feedback_list))
'''
order_set = set(order_list)
feedback_set = set(feedback_list)
# 指令-反馈(OF)字典
of_dict = dict((order, feedback) for order, feedback in zip(order_set, feedback_set))
'''
err_output = '我不明白您的意思,请再说一遍'
input_result = ''
input_text = ''
output_text = ''
def asrc_short_example():
ak = sys_ak # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sk = sys_sk # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
region = sys_region # region,如cn-north-4
project_id = sys_project_id # 同region一一对应,参考https://support.huaweicloud.com/api-sis/sis_03_0008.html
path = input_path # 文件位置, 需要具体到文件,如D:/test.wav
# 音频格式,默认不改动
path_audio_format = 'wav' # 音频格式,如wav等,详见api文档
path_property = 'chinese_16k_common' # language_sampleRate_domain, 如chinese_8k_common,详见api文档
# step1 初始化客户端
config = SisConfig()
config.set_connect_timeout(5) # 设置连接超时
config.set_read_timeout(10) # 设置读取超时
# 设置代理,使用代理前一定要确保代理可用。 代理格式可为[host, port] 或 [host, port, username, password]
# config.set_proxy(proxy)
asr_client = AsrCustomizationClient(ak, sk, region, project_id, sis_config=config)
# step2 构造请求
data = io_utils.encode_file(path)
asr_request = AsrCustomShortRequest(path_audio_format, path_property, data)
# 所有参数均可不设置,使用默认值
# 设置是否添加标点,yes or no,默认no
asr_request.set_add_punc('yes')
# 设置是否添加热词表id,没有则不填
# asr_request.set_vocabulary_id(None)
# step3 发送请求,返回结果,返回结果为json格式
result = asr_client.get_short_response(asr_request)
# print(json.dumps(result))
return(json.dumps(result, indent=2, ensure_ascii=False))
def ttsc_example():
""" 定制语音合成demo """
ak = sys_ak # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sk = sys_sk # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
region = sys_region # region,如cn-north-4
project_id = sys_project_id # 同region一一对应,参考https://support.huaweicloud.com/api-sis/sis_03_0008.html
text = output_text # 待合成文本,不超过500字
path = output_path # 保存路径,如D:/test.wav。 可在设置中选择不保存本地
# step1 初始化客户端
config = SisConfig()
config.set_connect_timeout(5) # 设置连接超时,单位s
config.set_read_timeout(10) # 设置读取超时,单位s
# 设置代理,使用代理前一定要确保代理可用。 代理格式可为[host, port] 或 [host, port, username, password]
# config.set_proxy(proxy)
ttsc_client = TtsCustomizationClient(ak, sk, region, project_id, sis_config=config)
# step2 构造请求
ttsc_request = TtsCustomRequest(text)
# 设置请求,所有参数均可不设置,使用默认参数
# 设置属性字符串, language_speaker_domain, 默认chinese_xiaoyan_common, 参考api文档
ttsc_request.set_property('chinese_xiaoyan_common')
# 设置音频格式,默认wav,可选mp3和pcm
ttsc_request.set_audio_format('wav')
# 设置采样率,8000 or 16000, 默认8000
ttsc_request.set_sample_rate('8000')
# 设置音量,[0, 100],默认50
ttsc_request.set_volume(50)
# 设置音高, [-500, 500], 默认0
ttsc_request.set_pitch(0)
# 设置音速, [-500, 500], 默认0
ttsc_request.set_speed(0)
# 设置是否保存,默认False
ttsc_request.set_saved(True)
# 设置保存路径,只有设置保存,此参数才生效
ttsc_request.set_saved_path(path)
# step3 发送请求,返回结果。如果设置保存,可在指定路径里查看保存的音频。
result = ttsc_client.get_ttsc_response(ttsc_request)
# print(json.dumps(result, indent=2, ensure_ascii=False))
if __name__ == '__main__':
input_result = asrc_short_example()
input_text = json.loads(input_result)['result']['text']
# print(input_text)
if (input_text in order_list):
output_text = of_dict[input_text]
else:
output_text = err_output
# print(output_text)
ttsc_example() | speech_interaction_demo.py |
from huaweicloud_sis.client.asr_client import AsrCustomizationClient
from huaweicloud_sis.bean.asr_request import AsrCustomShortRequest
from huaweicloud_sis.utils import io_utils
import json
from huaweicloud_sis.client.tts_client import TtsCustomizationClient
from huaweicloud_sis.bean.tts_request import TtsCustomRequest
from huaweicloud_sis.bean.sis_config import SisConfig
from huaweicloud_sis.exception.exceptions import ClientException
from huaweicloud_sis.exception.exceptions import ServerException
# 鉴权参数
sys_ak = '1HYJCSYF2PKVMFRWRHBP' # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sys_sk = '321gMCTHf2pou1PiLzMeZV1PW4osIoHIG23uhaxX' # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sys_region = 'cn-north-4' # region,如cn-north-4
sys_project_id = '07a919eae80025272f94c019ddb71922' # 同region一一对应,参考https://support.huaweicloud.com/api-sis/sis_03_0008.html
"""
todo 请正确填写音频格式和模型属性字符串
1. 音频格式一定要相匹配.
例如文件或者obs url是xx.wav, 则在一句话识别是wav格式,在录音文件识别是auto。具体参考api文档。
例如音频是pcm格式,并且采样率为8k,则格式填写pcm8k16bit。
如果返回audio_format is invalid 说明该文件格式不支持。具体支持哪些音频格式,需要参考api文档。
2. 音频采样率要与属性字符串的采样率要匹配。
例如格式选择pcm16k16bit,属性字符串却选择chinese_8k_common, 则会返回'audio_format' is not match model
例如wav本身是16k采样率,属性选择chinese_8k_common, 同样会返回'audio_format' is not match model
"""
# 可改动部分
input_path = 'E:/speech_test/input5.wav'
output_path = 'E:/speech_test/output.wav'
# 指令列表 新增指令需与其反馈一一对应
order_list = ['打开药盒。', '关闭药盒。']
# 反馈列表 新增反馈需与其指令一一对应
feedback_list = ['打开成功', '关闭成功']
of_dict = dict((order, feedback) for order, feedback in zip(order_list, feedback_list))
'''
order_set = set(order_list)
feedback_set = set(feedback_list)
# 指令-反馈(OF)字典
of_dict = dict((order, feedback) for order, feedback in zip(order_set, feedback_set))
'''
err_output = '我不明白您的意思,请再说一遍'
input_result = ''
input_text = ''
output_text = ''
def asrc_short_example():
ak = sys_ak # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sk = sys_sk # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
region = sys_region # region,如cn-north-4
project_id = sys_project_id # 同region一一对应,参考https://support.huaweicloud.com/api-sis/sis_03_0008.html
path = input_path # 文件位置, 需要具体到文件,如D:/test.wav
# 音频格式,默认不改动
path_audio_format = 'wav' # 音频格式,如wav等,详见api文档
path_property = 'chinese_16k_common' # language_sampleRate_domain, 如chinese_8k_common,详见api文档
# step1 初始化客户端
config = SisConfig()
config.set_connect_timeout(5) # 设置连接超时
config.set_read_timeout(10) # 设置读取超时
# 设置代理,使用代理前一定要确保代理可用。 代理格式可为[host, port] 或 [host, port, username, password]
# config.set_proxy(proxy)
asr_client = AsrCustomizationClient(ak, sk, region, project_id, sis_config=config)
# step2 构造请求
data = io_utils.encode_file(path)
asr_request = AsrCustomShortRequest(path_audio_format, path_property, data)
# 所有参数均可不设置,使用默认值
# 设置是否添加标点,yes or no,默认no
asr_request.set_add_punc('yes')
# 设置是否添加热词表id,没有则不填
# asr_request.set_vocabulary_id(None)
# step3 发送请求,返回结果,返回结果为json格式
result = asr_client.get_short_response(asr_request)
# print(json.dumps(result))
return(json.dumps(result, indent=2, ensure_ascii=False))
def ttsc_example():
""" 定制语音合成demo """
ak = sys_ak # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
sk = sys_sk # 参考https://support.huaweicloud.com/sdkreference-sis/sis_05_0003.html
region = sys_region # region,如cn-north-4
project_id = sys_project_id # 同region一一对应,参考https://support.huaweicloud.com/api-sis/sis_03_0008.html
text = output_text # 待合成文本,不超过500字
path = output_path # 保存路径,如D:/test.wav。 可在设置中选择不保存本地
# step1 初始化客户端
config = SisConfig()
config.set_connect_timeout(5) # 设置连接超时,单位s
config.set_read_timeout(10) # 设置读取超时,单位s
# 设置代理,使用代理前一定要确保代理可用。 代理格式可为[host, port] 或 [host, port, username, password]
# config.set_proxy(proxy)
ttsc_client = TtsCustomizationClient(ak, sk, region, project_id, sis_config=config)
# step2 构造请求
ttsc_request = TtsCustomRequest(text)
# 设置请求,所有参数均可不设置,使用默认参数
# 设置属性字符串, language_speaker_domain, 默认chinese_xiaoyan_common, 参考api文档
ttsc_request.set_property('chinese_xiaoyan_common')
# 设置音频格式,默认wav,可选mp3和pcm
ttsc_request.set_audio_format('wav')
# 设置采样率,8000 or 16000, 默认8000
ttsc_request.set_sample_rate('8000')
# 设置音量,[0, 100],默认50
ttsc_request.set_volume(50)
# 设置音高, [-500, 500], 默认0
ttsc_request.set_pitch(0)
# 设置音速, [-500, 500], 默认0
ttsc_request.set_speed(0)
# 设置是否保存,默认False
ttsc_request.set_saved(True)
# 设置保存路径,只有设置保存,此参数才生效
ttsc_request.set_saved_path(path)
# step3 发送请求,返回结果。如果设置保存,可在指定路径里查看保存的音频。
result = ttsc_client.get_ttsc_response(ttsc_request)
# print(json.dumps(result, indent=2, ensure_ascii=False))
if __name__ == '__main__':
input_result = asrc_short_example()
input_text = json.loads(input_result)['result']['text']
# print(input_text)
if (input_text in order_list):
output_text = of_dict[input_text]
else:
output_text = err_output
# print(output_text)
ttsc_example() | 0.146301 | 0.108472 |
# .................................................................................................................
level_dict["cheese"] = {
"scheme": "yellow_scheme",
"size": (11,12,7),
"intro": "cheese",
"help": (
"$scale(1.5)mission:\nactivate the exit!\n\n" + \
"to activate the exit,\nactivate the 4 switches\n\n" + \
"to activate the switches,\nshoot them\n\n" + \
"to be able to shoot the switches,\nmove the center stone",
"to move the center stone,\n\nuse the bomb.\n\n" + \
"the bomb will detonate if you shoot it"
),
"player": { "coordinates": (3,
4,3),
"nostatus": 0,
},
"exits": [
{
"name": "exit",
"active": 0,
"position": (-1,0,0),
},
],
"create":
"""
s = world.getSize ()
h = 0
# bomb and stones
for i in [1, 2,]:
world.addObjectAtPos (KikiWall(), KikiPos (1, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (1, i, 3))
world.addObjectAtPos (KikiWall(), KikiPos (2, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (2, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (2, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 0))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 3))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (6, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (6, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (7, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (7, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (7, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 0))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (9, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (9, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (10, i, 3))
for i in range(0,s.x):
for j in range(0, s.z):
world.addObjectAtPos (KikiStone(), KikiPos(i,2,j))
world.switch_counter = 0
def switched (switch):
world.switch_counter += switch.isActive() and 1 or -1
exit = kikiObjectToGate(world.getObjectWithName("exit"))
exit.setActive(world.switch_counter == 4)
switch1 = KikiSwitch()
switch1.getEventWithName("switched").addAction (continuous (lambda s=switch1: switched(s)))
switch2 = KikiSwitch()
switch2.getEventWithName("switched").addAction (continuous (lambda s=switch2: switched(s)))
switch3 = KikiSwitch()
switch3.getEventWithName("switched").addAction (continuous (lambda s=switch3: switched(s)))
switch4 = KikiSwitch()
switch4.getEventWithName("switched").addAction (continuous (lambda s=switch4: switched(s)))
world.addObjectAtPos (switch1, KikiPos (1, 0 ,2))
world.addObjectAtPos (switch2, KikiPos ( 7, 1, 0))
world.addObjectAtPos (switch3, KikiPos (9, 0, 0))
world.addObjectAtPos (switch4, KikiPos(9, 1, 5))
""",
} | py/levels/cheese.py |
# .................................................................................................................
level_dict["cheese"] = {
"scheme": "yellow_scheme",
"size": (11,12,7),
"intro": "cheese",
"help": (
"$scale(1.5)mission:\nactivate the exit!\n\n" + \
"to activate the exit,\nactivate the 4 switches\n\n" + \
"to activate the switches,\nshoot them\n\n" + \
"to be able to shoot the switches,\nmove the center stone",
"to move the center stone,\n\nuse the bomb.\n\n" + \
"the bomb will detonate if you shoot it"
),
"player": { "coordinates": (3,
4,3),
"nostatus": 0,
},
"exits": [
{
"name": "exit",
"active": 0,
"position": (-1,0,0),
},
],
"create":
"""
s = world.getSize ()
h = 0
# bomb and stones
for i in [1, 2,]:
world.addObjectAtPos (KikiWall(), KikiPos (1, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (1, i, 3))
world.addObjectAtPos (KikiWall(), KikiPos (2, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (2, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (2, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (3, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 0))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 3))
world.addObjectAtPos (KikiWall(), KikiPos (5, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (6, i, 1))
world.addObjectAtPos (KikiWall(), KikiPos (6, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (7, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (7, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (7, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 0))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (8, i, 5))
world.addObjectAtPos (KikiWall(), KikiPos (9, i, 2))
world.addObjectAtPos (KikiWall(), KikiPos (9, i, 4))
world.addObjectAtPos (KikiWall(), KikiPos (10, i, 3))
for i in range(0,s.x):
for j in range(0, s.z):
world.addObjectAtPos (KikiStone(), KikiPos(i,2,j))
world.switch_counter = 0
def switched (switch):
world.switch_counter += switch.isActive() and 1 or -1
exit = kikiObjectToGate(world.getObjectWithName("exit"))
exit.setActive(world.switch_counter == 4)
switch1 = KikiSwitch()
switch1.getEventWithName("switched").addAction (continuous (lambda s=switch1: switched(s)))
switch2 = KikiSwitch()
switch2.getEventWithName("switched").addAction (continuous (lambda s=switch2: switched(s)))
switch3 = KikiSwitch()
switch3.getEventWithName("switched").addAction (continuous (lambda s=switch3: switched(s)))
switch4 = KikiSwitch()
switch4.getEventWithName("switched").addAction (continuous (lambda s=switch4: switched(s)))
world.addObjectAtPos (switch1, KikiPos (1, 0 ,2))
world.addObjectAtPos (switch2, KikiPos ( 7, 1, 0))
world.addObjectAtPos (switch3, KikiPos (9, 0, 0))
world.addObjectAtPos (switch4, KikiPos(9, 1, 5))
""",
} | 0.162314 | 0.256937 |
import abc
from common.models.model import Model
from common.models.utils import make_rnn_cell
import tensorflow as tf
import numpy as np
"""
Abstract base class for generative sequence models
"""
class SequenceGenerativeModel(Model):
__metaclass__ = abc.ABCMeta
def __init__(self, hparams, sequence_encoder):
super(SequenceGenerativeModel, self).__init__(hparams)
self.sequence_encoder = sequence_encoder
self._rnn_cell = None
@classmethod
def from_file(cls, filename, sequence_encoder):
hparams = Model.hparams_from_file(filename)
return cls(hparams, sequence_encoder)
@property
def timeslice_size(self):
return self.sequence_encoder.encoded_timeslice_size
@property
def rnn_input_size(self):
return self.sequence_encoder.rnn_input_size
"""
Names and shapes of all the conditioning data this model expects in its condition dicts
"""
@property
def condition_shapes(self):
return self.sequence_encoder.condition_shapes
"""
Build the sub-graph for the RNN cell
Result is cached, so the same sub-graph can be re-used(?)
"""
def rnn_cell(self):
if self._rnn_cell is None:
self._rnn_cell = make_rnn_cell(self.hparams.rnn_layer_sizes,
dropout_keep_prob=self.hparams.dropout_keep_prob,
attn_length=self.hparams.attn_length)
return self._rnn_cell
"""
Get an RNN initial state vector for a given batch size
"""
def initial_state(self, batch_size):
return self.rnn_cell().zero_state(batch_size, tf.float32)
"""
Initial timeslice to use for input to this model in the absence of any priming inputs.
By default, this uses the encoder's empty timeslice (which is a zero vector)
"""
def default_initial_timeslice(self):
return self.sequence_encoder.timeslice_encoder.empty_timeslice
"""
Takes a training batch dict and returns an distribution conditioning dict (by
copying out the relevant fields)
"""
def batch_to_condition_dict(self, batch):
return { name: batch[name] for name in self.condition_shapes.keys() }
"""
Takes a history of time slices, plus the current conditioning dict, and
returns the next input vector to the RNN.
"""
def next_rnn_input(self, timeslice_history, condition_dict):
index = len(timeslice_history) - 1
return self.sequence_encoder.rnn_input_for_timeslice(timeslice_history, index, condition_dict)
"""
Run the RNN cell over the provided input vector, starting with initial_state
Returns RNN final state and ouput tensors
"""
def run_rnn(self, initial_state, rnn_inputs):
cell = self.rnn_cell()
outputs, final_state = tf.nn.dynamic_rnn(
cell, rnn_inputs, initial_state=initial_state, parallel_iterations=1,
swap_memory=True)
return final_state, outputs
@abc.abstractmethod
def get_step_dist(self, rnn_outputs, condition_dict):
"""
Given the output(s) from the RNN, compute the distribution over time slice(s)
Arguments:
- rnn_outputs: a 3D tensor (shape is [batch, time, depth])
- condition_dict: a dictionary of tensors that provide extra conditioning info for the
distribution.
Return value:
- A Distribution object. Collapses [batch, time] into one dimension and models entries as IID.
When used for training, batch will be e.g. 128 and time will be the maximum sequence length in the batch.
When used for sampling, batch will typically be 1 (or more, for e.g. SMC), and time will be 1.
"""
@abc.abstractmethod
def eval_factor_function(self, sample, condition):
"""
Given the sample for the current timeslice and a condition dictionary, return a score in log-space.
Sampling algorithms, such as particle filtering, can take this into account.
By default, returns 0. Subclasses can override this behavior.
Condition is an array of 1s, 0s, and -1s that specifies what the sample should be.
"""
"""
Override of method from Model class
Assumes that batch contains a 'lengths' and a 'outputs' field
NOTE: During training, we assume that timeslices + conditioning info has already been processed into
a single, unified RNN input vector, which is provided as the 'inputs' field of the batch.
Conditioning info is still separately available for building timeslice distributions.
"""
def training_loss(self, batch):
inputs = batch['inputs']
targets = batch['outputs']
lengths = batch['lengths']
batch_size = tf.shape(targets)[0]
_, rnn_outputs = self.run_rnn(self.initial_state(batch_size), inputs)
dist = self.get_step_dist(rnn_outputs, self.batch_to_condition_dict(batch))
targets_flat = tf.reshape(targets, [-1, self.timeslice_size])
# Mask out the stuff that was past the end of each training sequence (due to padding)
mask_flat = tf.reshape(tf.sequence_mask(lengths, dtype=tf.float32), [-1])
# Compute log probability (We assume that this gives a vector of probabilities, one for each
# timeslice entry)
log_prob = dist.log_prob(targets_flat)
# Sum across timeslice entries, then across time+batch
log_prob = tf.reduce_sum(log_prob, 1)
num_time_slices = tf.to_float(tf.reduce_sum(lengths))
log_prob = tf.reduce_sum(mask_flat * log_prob) / num_time_slices
return -log_prob | musicgen/common/models/sequenceGenerativeModel.py | import abc
from common.models.model import Model
from common.models.utils import make_rnn_cell
import tensorflow as tf
import numpy as np
"""
Abstract base class for generative sequence models
"""
class SequenceGenerativeModel(Model):
__metaclass__ = abc.ABCMeta
def __init__(self, hparams, sequence_encoder):
super(SequenceGenerativeModel, self).__init__(hparams)
self.sequence_encoder = sequence_encoder
self._rnn_cell = None
@classmethod
def from_file(cls, filename, sequence_encoder):
hparams = Model.hparams_from_file(filename)
return cls(hparams, sequence_encoder)
@property
def timeslice_size(self):
return self.sequence_encoder.encoded_timeslice_size
@property
def rnn_input_size(self):
return self.sequence_encoder.rnn_input_size
"""
Names and shapes of all the conditioning data this model expects in its condition dicts
"""
@property
def condition_shapes(self):
return self.sequence_encoder.condition_shapes
"""
Build the sub-graph for the RNN cell
Result is cached, so the same sub-graph can be re-used(?)
"""
def rnn_cell(self):
if self._rnn_cell is None:
self._rnn_cell = make_rnn_cell(self.hparams.rnn_layer_sizes,
dropout_keep_prob=self.hparams.dropout_keep_prob,
attn_length=self.hparams.attn_length)
return self._rnn_cell
"""
Get an RNN initial state vector for a given batch size
"""
def initial_state(self, batch_size):
return self.rnn_cell().zero_state(batch_size, tf.float32)
"""
Initial timeslice to use for input to this model in the absence of any priming inputs.
By default, this uses the encoder's empty timeslice (which is a zero vector)
"""
def default_initial_timeslice(self):
return self.sequence_encoder.timeslice_encoder.empty_timeslice
"""
Takes a training batch dict and returns an distribution conditioning dict (by
copying out the relevant fields)
"""
def batch_to_condition_dict(self, batch):
return { name: batch[name] for name in self.condition_shapes.keys() }
"""
Takes a history of time slices, plus the current conditioning dict, and
returns the next input vector to the RNN.
"""
def next_rnn_input(self, timeslice_history, condition_dict):
index = len(timeslice_history) - 1
return self.sequence_encoder.rnn_input_for_timeslice(timeslice_history, index, condition_dict)
"""
Run the RNN cell over the provided input vector, starting with initial_state
Returns RNN final state and ouput tensors
"""
def run_rnn(self, initial_state, rnn_inputs):
cell = self.rnn_cell()
outputs, final_state = tf.nn.dynamic_rnn(
cell, rnn_inputs, initial_state=initial_state, parallel_iterations=1,
swap_memory=True)
return final_state, outputs
@abc.abstractmethod
def get_step_dist(self, rnn_outputs, condition_dict):
"""
Given the output(s) from the RNN, compute the distribution over time slice(s)
Arguments:
- rnn_outputs: a 3D tensor (shape is [batch, time, depth])
- condition_dict: a dictionary of tensors that provide extra conditioning info for the
distribution.
Return value:
- A Distribution object. Collapses [batch, time] into one dimension and models entries as IID.
When used for training, batch will be e.g. 128 and time will be the maximum sequence length in the batch.
When used for sampling, batch will typically be 1 (or more, for e.g. SMC), and time will be 1.
"""
@abc.abstractmethod
def eval_factor_function(self, sample, condition):
"""
Given the sample for the current timeslice and a condition dictionary, return a score in log-space.
Sampling algorithms, such as particle filtering, can take this into account.
By default, returns 0. Subclasses can override this behavior.
Condition is an array of 1s, 0s, and -1s that specifies what the sample should be.
"""
"""
Override of method from Model class
Assumes that batch contains a 'lengths' and a 'outputs' field
NOTE: During training, we assume that timeslices + conditioning info has already been processed into
a single, unified RNN input vector, which is provided as the 'inputs' field of the batch.
Conditioning info is still separately available for building timeslice distributions.
"""
def training_loss(self, batch):
inputs = batch['inputs']
targets = batch['outputs']
lengths = batch['lengths']
batch_size = tf.shape(targets)[0]
_, rnn_outputs = self.run_rnn(self.initial_state(batch_size), inputs)
dist = self.get_step_dist(rnn_outputs, self.batch_to_condition_dict(batch))
targets_flat = tf.reshape(targets, [-1, self.timeslice_size])
# Mask out the stuff that was past the end of each training sequence (due to padding)
mask_flat = tf.reshape(tf.sequence_mask(lengths, dtype=tf.float32), [-1])
# Compute log probability (We assume that this gives a vector of probabilities, one for each
# timeslice entry)
log_prob = dist.log_prob(targets_flat)
# Sum across timeslice entries, then across time+batch
log_prob = tf.reduce_sum(log_prob, 1)
num_time_slices = tf.to_float(tf.reduce_sum(lengths))
log_prob = tf.reduce_sum(mask_flat * log_prob) / num_time_slices
return -log_prob | 0.838084 | 0.473292 |
from collections import Counter
from dataclasses import dataclass
from typing import Tuple, Union
class BitExpr:
def __lt__(self, other):
return repr(self) < repr(other)
@dataclass(frozen=True)
class Bit(BitExpr):
value: Union[bool, str] = 0 # Value should be 0, 1, or a unique name.
def __str__(self):
return str(self.value)
def __invert__(self) -> BitExpr:
if self.value == 0:
return Bit(1)
if self.value == 1:
return Bit(0)
return Xor.new(Bit(1), self)
def __and__(self, other: BitExpr) -> BitExpr:
if self.value == 0:
return self
if self.value == 1:
return other
if isinstance(other, Bit):
if other.value == 0:
return other
if other.value == 1 or self == other:
return self
if isinstance(other, Xor):
return Xor.new(*[self & c for c in other.children])
return And.new(self, other)
def __rand__(self, other: BitExpr) -> BitExpr:
return self & other
def __xor__(self, other: BitExpr) -> BitExpr:
if self.value == 0:
return other
if self.value == 1:
return ~other
if isinstance(other, Bit):
if other.value == 0:
return self
if other.value == 1:
return ~self
return Xor.new(self, other)
def __rxor__(self, other: BitExpr) -> BitExpr:
return self ^ other
def __or__(self, other: BitExpr) -> BitExpr:
if self.value == 0:
return other
if self.value == 1:
return self
if isinstance(other, Bit):
if other.value == 0:
return self
if other.value == 1:
return other
return Xor.new(self, other, self & other)
def __ror__(self, other: BitExpr) -> BitExpr:
return self | other
@dataclass(frozen=True)
class And(BitExpr):
"""Binary AND, represented with `&`."""
children: Tuple[BitExpr]
def __str__(self):
return "&".join(str(n) for n in self.children)
@staticmethod
def new(*children) -> BitExpr:
# Flatten any direct children that are also ANDs of something.
new_children = set()
for child in children:
if isinstance(child, And):
new_children.update(child.children)
elif isinstance(child, Bit):
if child.value == 0:
return Bit(0)
if child.value != 1:
new_children.add(child)
else:
raise NotImplementedError
assert Bit(0) not in new_children and Bit(1) not in new_children
if len(new_children) == 1:
return new_children.pop()
# Otherwise aggregate all children that are being ANDed together.
return And(children=tuple(sorted(new_children)))
def __invert__(self) -> BitExpr:
return Xor.new(Bit(1), self)
def __and__(self, other):
if isinstance(other, Bit):
return other & self
if isinstance(other, And):
return And.new(*(self.children + other.children))
if isinstance(other, Xor):
return Xor.new(*[c & self for c in other.children])
raise NotImplementedError
def __rand__(self, other: BitExpr) -> BitExpr:
return self & other
def __xor__(self, other: BitExpr) -> BitExpr:
return Xor.new(self, other)
def __rxor__(self, other: BitExpr) -> BitExpr:
return self ^ other
def __or__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit):
return other | self
return Xor.new(self, other, self & other)
def __ror__(self, other: BitExpr) -> BitExpr:
return self | other
@dataclass(frozen=True)
class Xor(BitExpr):
"""Binary XOR, represented with `^`."""
children: Tuple[BitExpr]
def __str__(self) -> str:
return "^".join(str(c) for c in self.children)
@staticmethod
def new(*children):
child_counts = Counter()
for child in children:
if isinstance(child, Xor):
child_counts.update(child.children)
elif child != Bit(0):
child_counts[child] += 1
# Children that match each other cancel out, so we can keep just the count % 2.
new_children = [child for child, count in child_counts.items() if count % 2]
if not new_children:
return Bit(0)
if len(new_children) == 1:
return new_children[0]
return Xor(children=tuple(sorted(new_children)))
def __invert__(self) -> BitExpr:
return Xor.new(Bit(1), *self.children)
def __and__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit) or isinstance(other, And):
return other & self
return Xor.new(*[a & b for a in self.children for b in other.children])
def __xor__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit) or isinstance(other, And):
return other ^ self
return Xor.new(*(self.children + other.children))
def __or__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit) or isinstance(other, And):
return other | self
return Xor.new(self, other, self & other) | bit_algebra.py | from collections import Counter
from dataclasses import dataclass
from typing import Tuple, Union
class BitExpr:
def __lt__(self, other):
return repr(self) < repr(other)
@dataclass(frozen=True)
class Bit(BitExpr):
value: Union[bool, str] = 0 # Value should be 0, 1, or a unique name.
def __str__(self):
return str(self.value)
def __invert__(self) -> BitExpr:
if self.value == 0:
return Bit(1)
if self.value == 1:
return Bit(0)
return Xor.new(Bit(1), self)
def __and__(self, other: BitExpr) -> BitExpr:
if self.value == 0:
return self
if self.value == 1:
return other
if isinstance(other, Bit):
if other.value == 0:
return other
if other.value == 1 or self == other:
return self
if isinstance(other, Xor):
return Xor.new(*[self & c for c in other.children])
return And.new(self, other)
def __rand__(self, other: BitExpr) -> BitExpr:
return self & other
def __xor__(self, other: BitExpr) -> BitExpr:
if self.value == 0:
return other
if self.value == 1:
return ~other
if isinstance(other, Bit):
if other.value == 0:
return self
if other.value == 1:
return ~self
return Xor.new(self, other)
def __rxor__(self, other: BitExpr) -> BitExpr:
return self ^ other
def __or__(self, other: BitExpr) -> BitExpr:
if self.value == 0:
return other
if self.value == 1:
return self
if isinstance(other, Bit):
if other.value == 0:
return self
if other.value == 1:
return other
return Xor.new(self, other, self & other)
def __ror__(self, other: BitExpr) -> BitExpr:
return self | other
@dataclass(frozen=True)
class And(BitExpr):
"""Binary AND, represented with `&`."""
children: Tuple[BitExpr]
def __str__(self):
return "&".join(str(n) for n in self.children)
@staticmethod
def new(*children) -> BitExpr:
# Flatten any direct children that are also ANDs of something.
new_children = set()
for child in children:
if isinstance(child, And):
new_children.update(child.children)
elif isinstance(child, Bit):
if child.value == 0:
return Bit(0)
if child.value != 1:
new_children.add(child)
else:
raise NotImplementedError
assert Bit(0) not in new_children and Bit(1) not in new_children
if len(new_children) == 1:
return new_children.pop()
# Otherwise aggregate all children that are being ANDed together.
return And(children=tuple(sorted(new_children)))
def __invert__(self) -> BitExpr:
return Xor.new(Bit(1), self)
def __and__(self, other):
if isinstance(other, Bit):
return other & self
if isinstance(other, And):
return And.new(*(self.children + other.children))
if isinstance(other, Xor):
return Xor.new(*[c & self for c in other.children])
raise NotImplementedError
def __rand__(self, other: BitExpr) -> BitExpr:
return self & other
def __xor__(self, other: BitExpr) -> BitExpr:
return Xor.new(self, other)
def __rxor__(self, other: BitExpr) -> BitExpr:
return self ^ other
def __or__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit):
return other | self
return Xor.new(self, other, self & other)
def __ror__(self, other: BitExpr) -> BitExpr:
return self | other
@dataclass(frozen=True)
class Xor(BitExpr):
"""Binary XOR, represented with `^`."""
children: Tuple[BitExpr]
def __str__(self) -> str:
return "^".join(str(c) for c in self.children)
@staticmethod
def new(*children):
child_counts = Counter()
for child in children:
if isinstance(child, Xor):
child_counts.update(child.children)
elif child != Bit(0):
child_counts[child] += 1
# Children that match each other cancel out, so we can keep just the count % 2.
new_children = [child for child, count in child_counts.items() if count % 2]
if not new_children:
return Bit(0)
if len(new_children) == 1:
return new_children[0]
return Xor(children=tuple(sorted(new_children)))
def __invert__(self) -> BitExpr:
return Xor.new(Bit(1), *self.children)
def __and__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit) or isinstance(other, And):
return other & self
return Xor.new(*[a & b for a in self.children for b in other.children])
def __xor__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit) or isinstance(other, And):
return other ^ self
return Xor.new(*(self.children + other.children))
def __or__(self, other: BitExpr) -> BitExpr:
if isinstance(other, Bit) or isinstance(other, And):
return other | self
return Xor.new(self, other, self & other) | 0.92487 | 0.355132 |
from __future__ import print_function
import sys
import time
from test_example_node import ExampleNodeTester
from ariac_example import ariac_example
import rospy
import rostest
class GripperTester(ExampleNodeTester):
def test(self):
self.comp_class = ariac_example.MyCompetitionClass()
ariac_example.connect_callbacks(self.comp_class)
time.sleep(1.0)
# Pre-defined initial pose because sometimes the arm starts "droopy"
self._send_arm_to_initial_pose()
# Pre-defined pose that puts the gripper in contact with a product.
self._send_arm_to_product()
# Enable the gripper so that it picks up the product.
self._test_enable_gripper()
# Move the product over the shipping box using a pre-defined sequence of poses.
self._send_arm_to_shipping_box()
self.assertTrue(
self.comp_class.current_gripper_state.enabled, 'Gripper no longer enabled')
self.assertTrue(
self.comp_class.current_gripper_state.attached, 'Product no longer attached')
# Disable the gripper so that it drops the product.
self._test_disable_gripper()
time.sleep(1.0)
def _test_enable_gripper(self):
success = self._enable_gripper()
self.assertTrue(success, 'Gripper not successfully controlled')
time.sleep(1.0)
self.assertTrue(
self.comp_class.current_gripper_state.enabled, 'Gripper not successfully enabled')
self.assertTrue(
self.comp_class.current_gripper_state.attached, 'Product not successfully attached')
def _enable_gripper(self):
success = ariac_example.control_gripper(True)
time.sleep(0.5)
return success
def _test_disable_gripper(self):
success = self._disable_gripper()
self.assertTrue(success, 'Gripper not successfully controlled')
time.sleep(1.0)
self.assertFalse(
self.comp_class.current_gripper_state.enabled, 'Gripper not successfully disabled')
self.assertFalse(
self.comp_class.current_gripper_state.attached, 'Product not successfully dettached')
def _disable_gripper(self):
success = ariac_example.control_gripper(False)
time.sleep(0.5)
return success
def _send_arm_to_product(self):
trajectory = [
[-1.272, -1.102, 0.050, 1.112, -1.329, 1.360, 0.902, -0.663],
[0.444, -1.885, -1.726, 1.945, -0.941, 1.754, -2.380, -0.018],
[0.025, -1.484, -2.085, 0.046, -1.041, 1.317, -2.134, 0.259],
[0.100, -1.751, -2.046, 0.010, -1.11, 1.312, -2.088, 0.190],
]
for positions in trajectory:
self.comp_class.send_arm_to_state(positions)
time.sleep(1.5)
def _send_arm_to_shipping_box(self):
trajectory = [
[0.216, -1.672, -2.10, 0.584, -1.140, 1.574, -2.380, 0.150],
[0.678, -2.060, -2.031, 1.876, -1.107, 1.914, -3.020, 0.294],
[1.601, -1.893, -2.465, 0.800, -0.893, 1.919, -2.572, 0.887],
[2.795, -2.009, -2.316, 0.556, -0.746, 1.745, -1.215, 0.206],
]
for positions in trajectory:
self.comp_class.send_arm_to_state(positions)
time.sleep(1.0)
if __name__ == '__main__':
rospy.init_node('test_gripper', anonymous=True)
# Wait until /clock is being published; this can take an unpredictable
# amount of time when we're downloading models.
while rospy.Time.now().to_sec() == 0.0:
print('Waiting for Gazebo to start...')
time.sleep(1.0)
# Take an extra nap, to allow plugins to be loaded
time.sleep(12.0)
print('OK, starting test.')
rostest.run('test_ariac', 'test_gripper', GripperTester, sys.argv) | test_ariac/test_gripper.py |
from __future__ import print_function
import sys
import time
from test_example_node import ExampleNodeTester
from ariac_example import ariac_example
import rospy
import rostest
class GripperTester(ExampleNodeTester):
def test(self):
self.comp_class = ariac_example.MyCompetitionClass()
ariac_example.connect_callbacks(self.comp_class)
time.sleep(1.0)
# Pre-defined initial pose because sometimes the arm starts "droopy"
self._send_arm_to_initial_pose()
# Pre-defined pose that puts the gripper in contact with a product.
self._send_arm_to_product()
# Enable the gripper so that it picks up the product.
self._test_enable_gripper()
# Move the product over the shipping box using a pre-defined sequence of poses.
self._send_arm_to_shipping_box()
self.assertTrue(
self.comp_class.current_gripper_state.enabled, 'Gripper no longer enabled')
self.assertTrue(
self.comp_class.current_gripper_state.attached, 'Product no longer attached')
# Disable the gripper so that it drops the product.
self._test_disable_gripper()
time.sleep(1.0)
def _test_enable_gripper(self):
success = self._enable_gripper()
self.assertTrue(success, 'Gripper not successfully controlled')
time.sleep(1.0)
self.assertTrue(
self.comp_class.current_gripper_state.enabled, 'Gripper not successfully enabled')
self.assertTrue(
self.comp_class.current_gripper_state.attached, 'Product not successfully attached')
def _enable_gripper(self):
success = ariac_example.control_gripper(True)
time.sleep(0.5)
return success
def _test_disable_gripper(self):
success = self._disable_gripper()
self.assertTrue(success, 'Gripper not successfully controlled')
time.sleep(1.0)
self.assertFalse(
self.comp_class.current_gripper_state.enabled, 'Gripper not successfully disabled')
self.assertFalse(
self.comp_class.current_gripper_state.attached, 'Product not successfully dettached')
def _disable_gripper(self):
success = ariac_example.control_gripper(False)
time.sleep(0.5)
return success
def _send_arm_to_product(self):
trajectory = [
[-1.272, -1.102, 0.050, 1.112, -1.329, 1.360, 0.902, -0.663],
[0.444, -1.885, -1.726, 1.945, -0.941, 1.754, -2.380, -0.018],
[0.025, -1.484, -2.085, 0.046, -1.041, 1.317, -2.134, 0.259],
[0.100, -1.751, -2.046, 0.010, -1.11, 1.312, -2.088, 0.190],
]
for positions in trajectory:
self.comp_class.send_arm_to_state(positions)
time.sleep(1.5)
def _send_arm_to_shipping_box(self):
trajectory = [
[0.216, -1.672, -2.10, 0.584, -1.140, 1.574, -2.380, 0.150],
[0.678, -2.060, -2.031, 1.876, -1.107, 1.914, -3.020, 0.294],
[1.601, -1.893, -2.465, 0.800, -0.893, 1.919, -2.572, 0.887],
[2.795, -2.009, -2.316, 0.556, -0.746, 1.745, -1.215, 0.206],
]
for positions in trajectory:
self.comp_class.send_arm_to_state(positions)
time.sleep(1.0)
if __name__ == '__main__':
rospy.init_node('test_gripper', anonymous=True)
# Wait until /clock is being published; this can take an unpredictable
# amount of time when we're downloading models.
while rospy.Time.now().to_sec() == 0.0:
print('Waiting for Gazebo to start...')
time.sleep(1.0)
# Take an extra nap, to allow plugins to be loaded
time.sleep(12.0)
print('OK, starting test.')
rostest.run('test_ariac', 'test_gripper', GripperTester, sys.argv) | 0.552298 | 0.441854 |
# 美拍视频下载
import sys
import os
import re
import urlparse
import urllib
import urllib2
import shutil
from BeautifulSoup import BeautifulSoup
path_ts = 'E:\\Share\\ts'
url_prefix = 'http://media-pili.1iptv.com'
url_web = 'http://www.meipai.com/media/575186182'
def find_text(reg, text):
result = re.findall(reg, text)
return result
def get_m3u8(url):
reg = "[\'](.*?)[\']"
user_agent = "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"
headers = {'User-Agent': user_agent}
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request)
# print response.read()
soup = BeautifulSoup(response.read())
for content in soup.findAll("script"):
for line in content.text.split('\n'):
if 'm3u8' in line:
result = find_text(reg, line)
return result[0]
def file_handler(filename):
list_ts = list()
list_url = list()
with open(filename, 'r') as f:
for line in f:
if line[0] != '#':
url = url_prefix + line.strip()
list_url.append(url)
list_ts.append(line.split('/')[-1].strip())
# print list_ts
# print list_url
return {'url': list_url, 'ts': list_ts}
def download(url, path):
filename = re.split('/', urlparse.urlparse(url).path)[-1]
filepath = os.path.join(path, filename)
if not os.path.isfile(filepath):
urllib.urlretrieve(url, filepath)
return filename
def clear_dir(path):
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception, e:
print e
def main():
url_m3u8 = get_m3u8(sys.argv[1])
file_m3u8 = download(url_m3u8, path_ts)
# print file_m3u8
data = file_handler(os.path.join(path_ts, file_m3u8))
list_url = data['url']
list_ts = data['ts']
for url in list_url:
download(url, path_ts)
file_merged = file_m3u8.split('.')[0] + '.ts'
# print file_merged
with open(file_merged, 'wb') as merged:
for ts in list_ts:
with open(os.path.join(path_ts, ts), 'rb') as mergefile:
shutil.copyfileobj(mergefile, merged)
clear_dir(path_ts)
if __name__ == '__main__':
main() | meipai.py |
# 美拍视频下载
import sys
import os
import re
import urlparse
import urllib
import urllib2
import shutil
from BeautifulSoup import BeautifulSoup
path_ts = 'E:\\Share\\ts'
url_prefix = 'http://media-pili.1iptv.com'
url_web = 'http://www.meipai.com/media/575186182'
def find_text(reg, text):
result = re.findall(reg, text)
return result
def get_m3u8(url):
reg = "[\'](.*?)[\']"
user_agent = "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"
headers = {'User-Agent': user_agent}
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request)
# print response.read()
soup = BeautifulSoup(response.read())
for content in soup.findAll("script"):
for line in content.text.split('\n'):
if 'm3u8' in line:
result = find_text(reg, line)
return result[0]
def file_handler(filename):
list_ts = list()
list_url = list()
with open(filename, 'r') as f:
for line in f:
if line[0] != '#':
url = url_prefix + line.strip()
list_url.append(url)
list_ts.append(line.split('/')[-1].strip())
# print list_ts
# print list_url
return {'url': list_url, 'ts': list_ts}
def download(url, path):
filename = re.split('/', urlparse.urlparse(url).path)[-1]
filepath = os.path.join(path, filename)
if not os.path.isfile(filepath):
urllib.urlretrieve(url, filepath)
return filename
def clear_dir(path):
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception, e:
print e
def main():
url_m3u8 = get_m3u8(sys.argv[1])
file_m3u8 = download(url_m3u8, path_ts)
# print file_m3u8
data = file_handler(os.path.join(path_ts, file_m3u8))
list_url = data['url']
list_ts = data['ts']
for url in list_url:
download(url, path_ts)
file_merged = file_m3u8.split('.')[0] + '.ts'
# print file_merged
with open(file_merged, 'wb') as merged:
for ts in list_ts:
with open(os.path.join(path_ts, ts), 'rb') as mergefile:
shutil.copyfileobj(mergefile, merged)
clear_dir(path_ts)
if __name__ == '__main__':
main() | 0.081926 | 0.056888 |
from django.shortcuts import render
from django.core.paginator import Paginator
from backend.serializers import KindAnimeSerializer, KindSerializer, AnimeSerializer, EpisodeSerializer, UserSerializer, PersonalKindSerializer
from backend.forms import AuthForm, UserCreateForm, MainForm
from django.views.generic import CreateView
from django.urls import reverse_lazy
from django.contrib.auth import views as auth_views
from backend.models import Kind, Anime, Episode, PersonalKind, Watching, KindAnime
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
def kind_inity():
k_list = []
querysetKind = Kind.objects.all()
for q in querysetKind:
serialized_kind = KindSerializer(q)
k_list.append(serialized_kind.data["kind_name"])
return k_list
def index(request):
form = MainForm()
if request.GET.get('search') != None:
query_set = Anime.objects.filter(name__icontains=request.GET.get('search')).order_by('name','season')
paginator = Paginator(query_set, len(query_set))
else:
query_set = Anime.objects.order_by('name','season')
paginator = Paginator(query_set, 18)
if query_set.exists():
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
else:
page_obj = {}
return render(request, 'index.html', {'form':form, 'is_blank':bool(not page_obj) ,'kind_list':kind_inity, 'page_obj':page_obj})
def anime_ep(request, anime, stagione, ep):
querysetAnime = Anime.objects.filter(name=anime, season=stagione)
identify = None
for q in querysetAnime:
if AnimeSerializer(q).data["name"] == anime and AnimeSerializer(q).data["season"] == stagione:
identify = AnimeSerializer(q).data['anime_id']
querysetEpisode = Episode.objects.filter(e_anime=identify, name=str(ep))
serialized_Episode = None
try:
Episode.objects.filter(e_anime=identify, name=str(ep)).update(seen=(EpisodeSerializer(querysetEpisode[0])).data['seen']+1)
serialized_Episode = EpisodeSerializer(querysetEpisode[0])
except IndexError:
return render(request, '404_not_found.html')
visual = serialized_Episode.data['seen']
also_like = set()
if request.user.is_authenticated:
serialized_user_id = UserSerializer(User.objects.filter(username=request.user)[0]).data['id']
queryset_personal_kind = PersonalKind.objects.filter(p_user=serialized_user_id)
for personal_kind in queryset_personal_kind:
curr_pk_id = PersonalKindSerializer(personal_kind).data['personal_kind_id']
for item in KindAnime.objects.filter(ka_kind=curr_pk_id):
if len(also_like) > 12:
break
tmp = str(item).split(" ")
anime_name = " ".join(tmp[:-2])
season = " ".join(tmp[len(tmp)-2:-1])
also_like.add((anime_name, season))
try:
w_user = User.objects.get(username=request.user)
w_anime = Anime.objects.get(name=anime,season=stagione)
w_episode = Episode.objects.get(e_anime=identify, name=str(ep))
w = Watching(w_user=w_user, w_anime=w_anime, w_episode=w_episode)
w.save()
except IntegrityError:
print("[DEBUG] Secondo me è già inserito, poi vedi tu - Cit il DB /[DEBUG]")
pass
return render(request, 'media.html', {'kind_list':kind_inity(), 'query':querysetAnime[0], 'anime': anime,'stagione':stagione,'ep':ep, 'ep_link':serialized_Episode.data["path"], 'visual': visual, 'also_like' : also_like})
def anime_ep_list(request, anime, stagione):
querysetAnime = Anime.objects.filter(name=anime, season=stagione)
serialized_Anime = AnimeSerializer(querysetAnime[0])
return render(request, 'index_episodes.html',{'kind_list':kind_inity(), 'anime':anime, 'season':stagione, 'ep_list':range(serialized_Anime.data["start_number_episode"], serialized_Anime.data["last_episode"]+1)})
def profile(request):
if request.user.is_authenticated:
user_query = get_user_model().objects.filter(username=request.user)
personal = PersonalKind.objects.filter(p_user=user_query[0])
not_like = []
for k in kind_inity():
if k not in str(personal):
not_like.append(k)
return render(request, 'profile.html', {'kind_list': kind_inity(), 'personal_list':personal, 'not_personal_list':not_like})
return render(request, 'forbidden.html')
def admin_control(request):
if request.user.is_authenticated and request.user.is_staff:
update_anime = Anime.objects.order_by('name','season')
update_kind = Kind.objects.all()
update_episode = Episode.objects.all()
return render(request, 'admin_1.html', {'update_anime':update_anime, 'update_kind':update_kind, 'update_episode':update_episode})
return render(request, 'forbidden.html')
def staff_create(request):
if request.user.is_authenticated and request.user.is_staff:
return render(request, 'admin_create.html')
return render(request, 'forbidden.html')
def staff_update(request):
if request.user.is_authenticated and request.user.is_staff:
return render(request, 'admin_update.html')
return render(request, 'forbidden.html')
def staff_delete(request):
if request.user.is_authenticated and request.user.is_staff:
return render(request, 'admin_delete.html')
return render(request, 'forbidden.html')
class CustomLogin(auth_views.LoginView):
form_class = AuthForm
template_name = 'registration/login.html'
def form_valid(self, form):
remember = form.cleaned_data['remember_me']
if not remember:
self.request.session.set_expiry(604800)
self.request.session.modified = True
return super(CustomLogin, self).form_valid(form)
class UserCreateView(CreateView):
form_class = UserCreateForm
template_name = 'registration/signup.html'
success_url = reverse_lazy('index')
def last_watching(request):
if request.user.is_authenticated:
w = Watching.objects.filter(w_user=User.objects.get(username=request.user)).order_by('-watching_id')
arr = []
for x in w:
x = str(x)[::-1]
x = x.split(" ", 2)
x = [i[::-1] for i in x]
x[0], x[2] = x[2], x[0]
arr.append(x)
try:
anime = arr[0][0]
stagione = arr[0][1]
ep = arr[0][2]
querysetAnime = Anime.objects.filter(name=anime, season=stagione)
identify = None
for q in querysetAnime:
if AnimeSerializer(q).data["name"] == anime and int(AnimeSerializer(q).data["season"]) == int(stagione):
identify = AnimeSerializer(q).data['anime_id']
querysetEpisode = Episode.objects.filter(e_anime=identify, name=str(ep))
serialized_Episode = EpisodeSerializer(querysetEpisode[0])
visual = serialized_Episode.data['seen']
return render(request, 'history.html', {'kind_list':kind_inity(), 'query':querysetAnime[0], 'anime': anime,'stagione':stagione,'ep':ep, 'ep_link':serialized_Episode.data["path"], 'visual': visual})
except IndexError:
return render(request, '404_not_found.html')
return render(request, 'forbidden.html')
def kind_search(request, kind):
identify = None
querysetKind = Kind.objects.filter(kind_name=kind)
for q in querysetKind:
if KindSerializer(q).data["kind_name"] == kind:
identify = KindSerializer(q).data["kind_id"]
querysetKindAnime = KindAnime.objects.filter(ka_kind=identify)
tmp = []
for q in querysetKindAnime:
tmp.append(str(q)[:-len(kind)-1].rsplit(" ", 1))
return render(request, 'kind.html', {'anime_list':tmp, 'kind_list':kind_inity(), 'kind':kind}) | skibidi/website/views.py | from django.shortcuts import render
from django.core.paginator import Paginator
from backend.serializers import KindAnimeSerializer, KindSerializer, AnimeSerializer, EpisodeSerializer, UserSerializer, PersonalKindSerializer
from backend.forms import AuthForm, UserCreateForm, MainForm
from django.views.generic import CreateView
from django.urls import reverse_lazy
from django.contrib.auth import views as auth_views
from backend.models import Kind, Anime, Episode, PersonalKind, Watching, KindAnime
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
def kind_inity():
k_list = []
querysetKind = Kind.objects.all()
for q in querysetKind:
serialized_kind = KindSerializer(q)
k_list.append(serialized_kind.data["kind_name"])
return k_list
def index(request):
form = MainForm()
if request.GET.get('search') != None:
query_set = Anime.objects.filter(name__icontains=request.GET.get('search')).order_by('name','season')
paginator = Paginator(query_set, len(query_set))
else:
query_set = Anime.objects.order_by('name','season')
paginator = Paginator(query_set, 18)
if query_set.exists():
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
else:
page_obj = {}
return render(request, 'index.html', {'form':form, 'is_blank':bool(not page_obj) ,'kind_list':kind_inity, 'page_obj':page_obj})
def anime_ep(request, anime, stagione, ep):
querysetAnime = Anime.objects.filter(name=anime, season=stagione)
identify = None
for q in querysetAnime:
if AnimeSerializer(q).data["name"] == anime and AnimeSerializer(q).data["season"] == stagione:
identify = AnimeSerializer(q).data['anime_id']
querysetEpisode = Episode.objects.filter(e_anime=identify, name=str(ep))
serialized_Episode = None
try:
Episode.objects.filter(e_anime=identify, name=str(ep)).update(seen=(EpisodeSerializer(querysetEpisode[0])).data['seen']+1)
serialized_Episode = EpisodeSerializer(querysetEpisode[0])
except IndexError:
return render(request, '404_not_found.html')
visual = serialized_Episode.data['seen']
also_like = set()
if request.user.is_authenticated:
serialized_user_id = UserSerializer(User.objects.filter(username=request.user)[0]).data['id']
queryset_personal_kind = PersonalKind.objects.filter(p_user=serialized_user_id)
for personal_kind in queryset_personal_kind:
curr_pk_id = PersonalKindSerializer(personal_kind).data['personal_kind_id']
for item in KindAnime.objects.filter(ka_kind=curr_pk_id):
if len(also_like) > 12:
break
tmp = str(item).split(" ")
anime_name = " ".join(tmp[:-2])
season = " ".join(tmp[len(tmp)-2:-1])
also_like.add((anime_name, season))
try:
w_user = User.objects.get(username=request.user)
w_anime = Anime.objects.get(name=anime,season=stagione)
w_episode = Episode.objects.get(e_anime=identify, name=str(ep))
w = Watching(w_user=w_user, w_anime=w_anime, w_episode=w_episode)
w.save()
except IntegrityError:
print("[DEBUG] Secondo me è già inserito, poi vedi tu - Cit il DB /[DEBUG]")
pass
return render(request, 'media.html', {'kind_list':kind_inity(), 'query':querysetAnime[0], 'anime': anime,'stagione':stagione,'ep':ep, 'ep_link':serialized_Episode.data["path"], 'visual': visual, 'also_like' : also_like})
def anime_ep_list(request, anime, stagione):
querysetAnime = Anime.objects.filter(name=anime, season=stagione)
serialized_Anime = AnimeSerializer(querysetAnime[0])
return render(request, 'index_episodes.html',{'kind_list':kind_inity(), 'anime':anime, 'season':stagione, 'ep_list':range(serialized_Anime.data["start_number_episode"], serialized_Anime.data["last_episode"]+1)})
def profile(request):
if request.user.is_authenticated:
user_query = get_user_model().objects.filter(username=request.user)
personal = PersonalKind.objects.filter(p_user=user_query[0])
not_like = []
for k in kind_inity():
if k not in str(personal):
not_like.append(k)
return render(request, 'profile.html', {'kind_list': kind_inity(), 'personal_list':personal, 'not_personal_list':not_like})
return render(request, 'forbidden.html')
def admin_control(request):
if request.user.is_authenticated and request.user.is_staff:
update_anime = Anime.objects.order_by('name','season')
update_kind = Kind.objects.all()
update_episode = Episode.objects.all()
return render(request, 'admin_1.html', {'update_anime':update_anime, 'update_kind':update_kind, 'update_episode':update_episode})
return render(request, 'forbidden.html')
def staff_create(request):
if request.user.is_authenticated and request.user.is_staff:
return render(request, 'admin_create.html')
return render(request, 'forbidden.html')
def staff_update(request):
if request.user.is_authenticated and request.user.is_staff:
return render(request, 'admin_update.html')
return render(request, 'forbidden.html')
def staff_delete(request):
if request.user.is_authenticated and request.user.is_staff:
return render(request, 'admin_delete.html')
return render(request, 'forbidden.html')
class CustomLogin(auth_views.LoginView):
form_class = AuthForm
template_name = 'registration/login.html'
def form_valid(self, form):
remember = form.cleaned_data['remember_me']
if not remember:
self.request.session.set_expiry(604800)
self.request.session.modified = True
return super(CustomLogin, self).form_valid(form)
class UserCreateView(CreateView):
form_class = UserCreateForm
template_name = 'registration/signup.html'
success_url = reverse_lazy('index')
def last_watching(request):
if request.user.is_authenticated:
w = Watching.objects.filter(w_user=User.objects.get(username=request.user)).order_by('-watching_id')
arr = []
for x in w:
x = str(x)[::-1]
x = x.split(" ", 2)
x = [i[::-1] for i in x]
x[0], x[2] = x[2], x[0]
arr.append(x)
try:
anime = arr[0][0]
stagione = arr[0][1]
ep = arr[0][2]
querysetAnime = Anime.objects.filter(name=anime, season=stagione)
identify = None
for q in querysetAnime:
if AnimeSerializer(q).data["name"] == anime and int(AnimeSerializer(q).data["season"]) == int(stagione):
identify = AnimeSerializer(q).data['anime_id']
querysetEpisode = Episode.objects.filter(e_anime=identify, name=str(ep))
serialized_Episode = EpisodeSerializer(querysetEpisode[0])
visual = serialized_Episode.data['seen']
return render(request, 'history.html', {'kind_list':kind_inity(), 'query':querysetAnime[0], 'anime': anime,'stagione':stagione,'ep':ep, 'ep_link':serialized_Episode.data["path"], 'visual': visual})
except IndexError:
return render(request, '404_not_found.html')
return render(request, 'forbidden.html')
def kind_search(request, kind):
identify = None
querysetKind = Kind.objects.filter(kind_name=kind)
for q in querysetKind:
if KindSerializer(q).data["kind_name"] == kind:
identify = KindSerializer(q).data["kind_id"]
querysetKindAnime = KindAnime.objects.filter(ka_kind=identify)
tmp = []
for q in querysetKindAnime:
tmp.append(str(q)[:-len(kind)-1].rsplit(" ", 1))
return render(request, 'kind.html', {'anime_list':tmp, 'kind_list':kind_inity(), 'kind':kind}) | 0.333178 | 0.10904 |
from setuptools import distutils
from inspect import getmembers, isfunction
import functools
import glob
import os
import pkgutil
import sys
import types
__all__ = [
'get_python_library',
'get_python_methods',
'wraps',
]
if sys.version_info[0:2] >= (3, 4): # Python v3.4+?
wraps = functools.wraps # built-in has __wrapped__ attribute
else:
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped # set attribute missing in earlier versions
return f
return wrapper
# unify functions
if sys.version_info[0] == 2:
from itertools import izip as zip, imap as map, ifilter as filter
filter = filter
map = map
zip = zip
if sys.version_info[0] == 3:
from functools import reduce
reduce = reduce
range = xrange if sys.version_info[0] == 2 else range
# unify itertools and functools
if sys.version_info[0] == 2:
from itertools import ifilterfalse as filterfalse
from itertools import izip_longest as zip_longest
from functools32 import partial, wraps
else:
from itertools import filterfalse
from itertools import zip_longest
from functools import partial, wraps
def get_python_library():
# Get list of the loaded source modules on sys.path.
modules = {
module
for _, module, package in list(pkgutil.iter_modules())
if package is False
}
# Glob all the 'top_level.txt' files installed under site-packages.
site_packages = glob.iglob(os.path.join(os.path.dirname(os.__file__)
+ '/site-packages', '*-info', 'top_level.txt'))
# Read the files for the import names and remove them from the modules
# list.
modules -= {open(txt).read().strip() for txt in site_packages}
# Get the system packages.
system_modules = set(sys.builtin_module_names)
# Get the just the top-level packages from the python install.
python_root = distutils.sysconfig.get_python_lib(standard_lib=True)
_, top_level_libs, _ = list(os.walk(python_root))[0]
return sorted(top_level_libs + list(modules | system_modules))
def get_python_methods(module):
assert isinstance(module, types.ModuleType)
return getmembers(module, isfunction) | gemini/utils/import_util.py | from setuptools import distutils
from inspect import getmembers, isfunction
import functools
import glob
import os
import pkgutil
import sys
import types
__all__ = [
'get_python_library',
'get_python_methods',
'wraps',
]
if sys.version_info[0:2] >= (3, 4): # Python v3.4+?
wraps = functools.wraps # built-in has __wrapped__ attribute
else:
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped # set attribute missing in earlier versions
return f
return wrapper
# unify functions
if sys.version_info[0] == 2:
from itertools import izip as zip, imap as map, ifilter as filter
filter = filter
map = map
zip = zip
if sys.version_info[0] == 3:
from functools import reduce
reduce = reduce
range = xrange if sys.version_info[0] == 2 else range
# unify itertools and functools
if sys.version_info[0] == 2:
from itertools import ifilterfalse as filterfalse
from itertools import izip_longest as zip_longest
from functools32 import partial, wraps
else:
from itertools import filterfalse
from itertools import zip_longest
from functools import partial, wraps
def get_python_library():
# Get list of the loaded source modules on sys.path.
modules = {
module
for _, module, package in list(pkgutil.iter_modules())
if package is False
}
# Glob all the 'top_level.txt' files installed under site-packages.
site_packages = glob.iglob(os.path.join(os.path.dirname(os.__file__)
+ '/site-packages', '*-info', 'top_level.txt'))
# Read the files for the import names and remove them from the modules
# list.
modules -= {open(txt).read().strip() for txt in site_packages}
# Get the system packages.
system_modules = set(sys.builtin_module_names)
# Get the just the top-level packages from the python install.
python_root = distutils.sysconfig.get_python_lib(standard_lib=True)
_, top_level_libs, _ = list(os.walk(python_root))[0]
return sorted(top_level_libs + list(modules | system_modules))
def get_python_methods(module):
assert isinstance(module, types.ModuleType)
return getmembers(module, isfunction) | 0.301979 | 0.092688 |
import asyncio
from glob import glob
from typing import Optional
import aiohttp
import pincer
from pincer import Client
from pincer.objects import Embed
from mcoding_bot.config import Config
from mcoding_bot.database import Database
from mcoding_bot.cache import Cache
class Bot(Client):
def __init__(self, config: Config):
self.theme = 0x0B7CD3
self.load_cogs()
self.config = config
self.session: Optional[aiohttp.ClientSession] = None
super().__init__(self.config.token, intents=pincer.Intents.all())
self.database = Database()
self.cache = Cache(self)
self.loop: asyncio.AbstractEventLoop
self.stop_future: asyncio.Event
def run(self):
loop = asyncio.get_event_loop()
self.loop = loop
loop.run_until_complete(self._run())
loop.run_until_complete(self._cleanup())
async def _run(self):
await self.database.connect(
host="localhost",
database=self.config.db_name,
user=self.config.db_user,
password=self.<PASSWORD>,
)
self.stop_future = asyncio.Event(loop=self.loop)
await self.start_shard(0, 1)
await self.stop_future.wait()
async def _cleanup(self):
if self.session and not self.session.closed:
await self.session.close()
await self.database.cleanup()
async def get_session(self):
if self.session is None:
self.session = aiohttp.ClientSession()
return self.session
def load_cogs(self):
"""Load all cogs from the `cogs` directory."""
for cog in glob("mcoding_bot/cogs/*.py"):
if "__init__" in cog:
continue
self.load_cog(cog.replace("/", ".").replace("\\", ".")[:-3])
print("Loaded cogs from", cog)
@Client.event
async def on_ready(self):
print(
" _____ _ _ _____ _",
" _____| |___ _| |_|___ ___ | __ |___| |_",
"| | --| . | . | | | . | | __ -| . | _|",
"|_|_|_|_____|___|___|_|_|_|_ | |_____|___|_|",
" |___|" "",
sep="\n",
)
def embed(self, **kwargs) -> Embed:
return Embed(**kwargs, color=self.theme).set_footer(
text=f"{self.bot.username} - /help for more information",
)
def close(self):
self.stop_future.set() | mcoding_bot/bot.py | import asyncio
from glob import glob
from typing import Optional
import aiohttp
import pincer
from pincer import Client
from pincer.objects import Embed
from mcoding_bot.config import Config
from mcoding_bot.database import Database
from mcoding_bot.cache import Cache
class Bot(Client):
def __init__(self, config: Config):
self.theme = 0x0B7CD3
self.load_cogs()
self.config = config
self.session: Optional[aiohttp.ClientSession] = None
super().__init__(self.config.token, intents=pincer.Intents.all())
self.database = Database()
self.cache = Cache(self)
self.loop: asyncio.AbstractEventLoop
self.stop_future: asyncio.Event
def run(self):
loop = asyncio.get_event_loop()
self.loop = loop
loop.run_until_complete(self._run())
loop.run_until_complete(self._cleanup())
async def _run(self):
await self.database.connect(
host="localhost",
database=self.config.db_name,
user=self.config.db_user,
password=self.<PASSWORD>,
)
self.stop_future = asyncio.Event(loop=self.loop)
await self.start_shard(0, 1)
await self.stop_future.wait()
async def _cleanup(self):
if self.session and not self.session.closed:
await self.session.close()
await self.database.cleanup()
async def get_session(self):
if self.session is None:
self.session = aiohttp.ClientSession()
return self.session
def load_cogs(self):
"""Load all cogs from the `cogs` directory."""
for cog in glob("mcoding_bot/cogs/*.py"):
if "__init__" in cog:
continue
self.load_cog(cog.replace("/", ".").replace("\\", ".")[:-3])
print("Loaded cogs from", cog)
@Client.event
async def on_ready(self):
print(
" _____ _ _ _____ _",
" _____| |___ _| |_|___ ___ | __ |___| |_",
"| | --| . | . | | | . | | __ -| . | _|",
"|_|_|_|_____|___|___|_|_|_|_ | |_____|___|_|",
" |___|" "",
sep="\n",
)
def embed(self, **kwargs) -> Embed:
return Embed(**kwargs, color=self.theme).set_footer(
text=f"{self.bot.username} - /help for more information",
)
def close(self):
self.stop_future.set() | 0.73029 | 0.112893 |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import os
import shutil
import json
from uuid import UUID
import mock
from requests.exceptions import ConnectTimeout, ConnectionError as ConnectError
import pytest as pt
from source.app import FinnOwnership, Scraper
from source.util import TrackingError
class TestFinnOwnership:
"""
Test cases for the FinnOwnership scraper
"""
@classmethod
def setup(cls):
"""
Executed before all tests
"""
cls.finn_ownership = FinnOwnership("144857770")
def test_finn_ownership_is_instance_of_scraper(self):
"""
Test that FinnOwnership object is instance and subclass of Scraper
"""
for parent in [FinnOwnership, Scraper]:
assert isinstance(self.finn_ownership, parent)
assert issubclass(self.finn_ownership.__class__, parent)
@staticmethod
@pt.mark.parametrize("invalid_finn_ownership_code_types",
[144857770, 144857770.0, True, [], (), {}])
def test_invalid_finn_ownership_code_raises_not_found_error(invalid_finn_ownership_code_types):
"""
Test that FinnOwnership raises TypeError for invalid finn_ownership_code types
"""
with pt.raises(TypeError):
FinnOwnership(invalid_finn_ownership_code_types)
@pt.mark.parametrize("invalid_finn_ownership_code", ["1448577", "2448577701", "24485777a"])
def test_validate_finn_ownership_code_method(self, invalid_finn_ownership_code):
"""
Test that invalid finn_ownership_code raises TrackingError
"""
with pt.raises(TrackingError):
FinnOwnership(invalid_finn_ownership_code)
with pt.raises(TrackingError):
self.finn_ownership.validate_finn_code(invalid_finn_ownership_code)
def test_finn_ownership_has_uuid4_compatible_id(self):
"""
Test FinnOwnership scraper has uuid4 compatible ids
"""
assert UUID(str(self.finn_ownership.id_))
def test_ownership_response_method(self):
"""
Test the FinnOwnership response method
"""
assert self.finn_ownership.ownership_response().status_code == 200
@staticmethod
@mock.patch("requests.get", mock.MagicMock(side_effect=ConnectTimeout))
def test_response_throws_tracking_error_for_time_out():
"""
Test that response method throws TrackingError if requests.get throws ConnectTimeout
"""
with pt.raises(TrackingError):
finn_ownership = FinnOwnership("144857770")
finn_ownership.ownership_response()
@staticmethod
@mock.patch("requests.get", mock.MagicMock(side_effect=ConnectError))
def test_response_throws_tracking_error_no_connection():
"""
Test that response method throws TrackingError if requests.get throws ConnectError
"""
with pt.raises(TrackingError):
finn_ownership = FinnOwnership("144857770")
finn_ownership.ownership_response()
@staticmethod
@mock.patch("source.app.scrapers.finn_ownership.FinnOwnership.ownership_response",
mock.MagicMock(return_value=None))
def test_housing_ownership_information_throws_not_found_error_if_none_response():
"""
Test that housing_ownership_information method does not throws AttributeError
if ownership_response is None
"""
finn_ownership = FinnOwnership("144857770")
finn_ownership.housing_ownership_information()
@mock.patch("source.app.scrapers.finn_ownership.FinnOwnership.ownership_response",
mock.MagicMock(side_effect=ValueError("this is a test")))
def test_housing_ownership_information_throws_exception(self):
"""
Test that housing_ad_information captures and raises exception
"""
with pt.raises(TrackingError):
self.finn_ownership.housing_ownership_information()
@mock.patch("source.app.scrapers.finn_ownership.FinnOwnership.housing_ownership_information",
mock.MagicMock(return_value=""))
def test_to_json(self):
"""
Test that staticmethod to_json() produces json file with correct content
"""
current_dir = os.path.dirname(__file__)
file_dir = os.path.join(current_dir, "report", "json")
self.finn_ownership.to_json(file_dir=file_dir)
with open(os.path.join(file_dir, os.listdir(file_dir)[-1])) as json_file:
data = json.load(json_file)
assert data == ""
shutil.rmtree(os.path.join(current_dir, "report"), ignore_errors=True) | tests/app/scrapers/test_finn_ownership.py | __author__ = '<NAME>'
__email__ = '<EMAIL>'
import os
import shutil
import json
from uuid import UUID
import mock
from requests.exceptions import ConnectTimeout, ConnectionError as ConnectError
import pytest as pt
from source.app import FinnOwnership, Scraper
from source.util import TrackingError
class TestFinnOwnership:
"""
Test cases for the FinnOwnership scraper
"""
@classmethod
def setup(cls):
"""
Executed before all tests
"""
cls.finn_ownership = FinnOwnership("144857770")
def test_finn_ownership_is_instance_of_scraper(self):
"""
Test that FinnOwnership object is instance and subclass of Scraper
"""
for parent in [FinnOwnership, Scraper]:
assert isinstance(self.finn_ownership, parent)
assert issubclass(self.finn_ownership.__class__, parent)
@staticmethod
@pt.mark.parametrize("invalid_finn_ownership_code_types",
[144857770, 144857770.0, True, [], (), {}])
def test_invalid_finn_ownership_code_raises_not_found_error(invalid_finn_ownership_code_types):
"""
Test that FinnOwnership raises TypeError for invalid finn_ownership_code types
"""
with pt.raises(TypeError):
FinnOwnership(invalid_finn_ownership_code_types)
@pt.mark.parametrize("invalid_finn_ownership_code", ["1448577", "2448577701", "24485777a"])
def test_validate_finn_ownership_code_method(self, invalid_finn_ownership_code):
"""
Test that invalid finn_ownership_code raises TrackingError
"""
with pt.raises(TrackingError):
FinnOwnership(invalid_finn_ownership_code)
with pt.raises(TrackingError):
self.finn_ownership.validate_finn_code(invalid_finn_ownership_code)
def test_finn_ownership_has_uuid4_compatible_id(self):
"""
Test FinnOwnership scraper has uuid4 compatible ids
"""
assert UUID(str(self.finn_ownership.id_))
def test_ownership_response_method(self):
"""
Test the FinnOwnership response method
"""
assert self.finn_ownership.ownership_response().status_code == 200
@staticmethod
@mock.patch("requests.get", mock.MagicMock(side_effect=ConnectTimeout))
def test_response_throws_tracking_error_for_time_out():
"""
Test that response method throws TrackingError if requests.get throws ConnectTimeout
"""
with pt.raises(TrackingError):
finn_ownership = FinnOwnership("144857770")
finn_ownership.ownership_response()
@staticmethod
@mock.patch("requests.get", mock.MagicMock(side_effect=ConnectError))
def test_response_throws_tracking_error_no_connection():
"""
Test that response method throws TrackingError if requests.get throws ConnectError
"""
with pt.raises(TrackingError):
finn_ownership = FinnOwnership("144857770")
finn_ownership.ownership_response()
@staticmethod
@mock.patch("source.app.scrapers.finn_ownership.FinnOwnership.ownership_response",
mock.MagicMock(return_value=None))
def test_housing_ownership_information_throws_not_found_error_if_none_response():
"""
Test that housing_ownership_information method does not throws AttributeError
if ownership_response is None
"""
finn_ownership = FinnOwnership("144857770")
finn_ownership.housing_ownership_information()
@mock.patch("source.app.scrapers.finn_ownership.FinnOwnership.ownership_response",
mock.MagicMock(side_effect=ValueError("this is a test")))
def test_housing_ownership_information_throws_exception(self):
"""
Test that housing_ad_information captures and raises exception
"""
with pt.raises(TrackingError):
self.finn_ownership.housing_ownership_information()
@mock.patch("source.app.scrapers.finn_ownership.FinnOwnership.housing_ownership_information",
mock.MagicMock(return_value=""))
def test_to_json(self):
"""
Test that staticmethod to_json() produces json file with correct content
"""
current_dir = os.path.dirname(__file__)
file_dir = os.path.join(current_dir, "report", "json")
self.finn_ownership.to_json(file_dir=file_dir)
with open(os.path.join(file_dir, os.listdir(file_dir)[-1])) as json_file:
data = json.load(json_file)
assert data == ""
shutil.rmtree(os.path.join(current_dir, "report"), ignore_errors=True) | 0.604749 | 0.258853 |
import logging
import os
import shutil
from typing import BinaryIO, Iterator, Optional, List
import requests
from . import crypto, api, utils
from .params import KeeperParams
from .proto import record_pb2
from .vault import KeeperRecord, PasswordRecord, TypedRecord, FileRecord, AttachmentFile
def prepare_attachment_download(params, record_uid, attachment_name=None):
# type: (KeeperParams, str, Optional[str]) -> Iterator['AttachmentDownloadRequest']
record = KeeperRecord.load(params, record_uid)
if not record:
logging.warning('Record UID \"%s\" not found.', record_uid)
if record.version in {3, 4}:
rq = record_pb2.FilesGetRequest()
rq.for_thumbnails = False
if isinstance(record, FileRecord):
rq.record_uids.append(utils.base64_url_decode(record.record_uid))
elif isinstance(record, TypedRecord):
typed_field = record.get_typed_field('fileRef')
if typed_field and isinstance(typed_field.value, list):
for file_uid in typed_field.value:
file_record = KeeperRecord.load(params, file_uid)
if isinstance(file_record, FileRecord):
if attachment_name:
if attachment_name != file_uid and file_record.title.lower() != attachment_name.lower() and \
file_record.name.lower() != attachment_name.lower():
continue
rq.record_uids.append(utils.base64_url_decode(file_uid))
if len(rq.record_uids) > 0:
rs = api.communicate_rest(params, rq, 'vault/files_download', rs_type=record_pb2.FilesGetResponse)
for file_status in rs.files:
file_uid = utils.base64_url_encode(file_status.record_uid)
if file_status.status == record_pb2.FG_SUCCESS:
file_record = KeeperRecord.load(params, file_uid)
if isinstance(file_record, FileRecord):
adr = AttachmentDownloadRequest()
adr.url = file_status.url
adr.success_status_code = file_status.success_status_code
adr.encryption_key = file_record.record_key
adr.title = file_record.title if file_record.title else file_record.name
adr.is_gcm_encrypted = file_status.fileKeyType == record_pb2.ENCRYPTED_BY_DATA_KEY_GCM
yield adr
else:
logging.warning('Error requesting download URL for file \"%s\"', file_uid)
elif record.version == 2:
attachments = [] # type: List[AttachmentFile]
if isinstance(record, PasswordRecord):
for atta in (record.attachments or []):
if attachment_name:
if attachment_name != atta.id and attachment_name.lower() != atta.title.lower() and \
attachment_name.lower() != atta.name.lower():
continue
attachments.append(atta)
if len(attachments) > 0:
rq = {
'command': 'request_download',
'file_ids': [x.id for x in attachments],
}
api.resolve_record_access_path(params, record_uid, path=rq)
rs = api.communicate(params, rq)
if rs['result'] == 'success':
for attachment, dl in zip(attachments, rs['downloads']):
if 'url' in dl:
adr = AttachmentDownloadRequest()
adr.title = attachment.title if attachment.title else attachment.name
adr.url = dl['url']
adr.encryption_key = utils.base64_url_decode(attachment.key)
adr.is_gcm_encrypted = False
yield adr
class AttachmentDownloadRequest:
def __init__(self):
self.url = ''
self.encryption_key = b''
self.title = ''
self.is_gcm_encrypted = False
self.success_status_code = 200
def download_to_file(self, params, file_name): # type: (KeeperParams, str) -> None
logging.info('Downloading \'%s\'', os.path.abspath(file_name))
with open(file_name, 'wb') as file_stream:
self.download_to_stream(params, file_stream)
def download_to_stream(self, params, output_stream): # type: (KeeperParams, BinaryIO) -> int
with requests.get(self.url, proxies=params.rest_context.proxies, stream=True) as rq_http:
if self.success_status_code != rq_http.status_code:
logging.warning('HTTP status code: %d', rq_http.status_code)
crypter = crypto.StreamCrypter()
crypter.is_gcm = self.is_gcm_encrypted
crypter.key = self.encryption_key
with crypter.set_stream(rq_http.raw, for_encrypt=False) as attachment:
shutil.copyfileobj(attachment, output_stream, 10240)
output_stream.flush()
return crypter.bytes_read | keepercommander/attachment.py |
import logging
import os
import shutil
from typing import BinaryIO, Iterator, Optional, List
import requests
from . import crypto, api, utils
from .params import KeeperParams
from .proto import record_pb2
from .vault import KeeperRecord, PasswordRecord, TypedRecord, FileRecord, AttachmentFile
def prepare_attachment_download(params, record_uid, attachment_name=None):
# type: (KeeperParams, str, Optional[str]) -> Iterator['AttachmentDownloadRequest']
record = KeeperRecord.load(params, record_uid)
if not record:
logging.warning('Record UID \"%s\" not found.', record_uid)
if record.version in {3, 4}:
rq = record_pb2.FilesGetRequest()
rq.for_thumbnails = False
if isinstance(record, FileRecord):
rq.record_uids.append(utils.base64_url_decode(record.record_uid))
elif isinstance(record, TypedRecord):
typed_field = record.get_typed_field('fileRef')
if typed_field and isinstance(typed_field.value, list):
for file_uid in typed_field.value:
file_record = KeeperRecord.load(params, file_uid)
if isinstance(file_record, FileRecord):
if attachment_name:
if attachment_name != file_uid and file_record.title.lower() != attachment_name.lower() and \
file_record.name.lower() != attachment_name.lower():
continue
rq.record_uids.append(utils.base64_url_decode(file_uid))
if len(rq.record_uids) > 0:
rs = api.communicate_rest(params, rq, 'vault/files_download', rs_type=record_pb2.FilesGetResponse)
for file_status in rs.files:
file_uid = utils.base64_url_encode(file_status.record_uid)
if file_status.status == record_pb2.FG_SUCCESS:
file_record = KeeperRecord.load(params, file_uid)
if isinstance(file_record, FileRecord):
adr = AttachmentDownloadRequest()
adr.url = file_status.url
adr.success_status_code = file_status.success_status_code
adr.encryption_key = file_record.record_key
adr.title = file_record.title if file_record.title else file_record.name
adr.is_gcm_encrypted = file_status.fileKeyType == record_pb2.ENCRYPTED_BY_DATA_KEY_GCM
yield adr
else:
logging.warning('Error requesting download URL for file \"%s\"', file_uid)
elif record.version == 2:
attachments = [] # type: List[AttachmentFile]
if isinstance(record, PasswordRecord):
for atta in (record.attachments or []):
if attachment_name:
if attachment_name != atta.id and attachment_name.lower() != atta.title.lower() and \
attachment_name.lower() != atta.name.lower():
continue
attachments.append(atta)
if len(attachments) > 0:
rq = {
'command': 'request_download',
'file_ids': [x.id for x in attachments],
}
api.resolve_record_access_path(params, record_uid, path=rq)
rs = api.communicate(params, rq)
if rs['result'] == 'success':
for attachment, dl in zip(attachments, rs['downloads']):
if 'url' in dl:
adr = AttachmentDownloadRequest()
adr.title = attachment.title if attachment.title else attachment.name
adr.url = dl['url']
adr.encryption_key = utils.base64_url_decode(attachment.key)
adr.is_gcm_encrypted = False
yield adr
class AttachmentDownloadRequest:
def __init__(self):
self.url = ''
self.encryption_key = b''
self.title = ''
self.is_gcm_encrypted = False
self.success_status_code = 200
def download_to_file(self, params, file_name): # type: (KeeperParams, str) -> None
logging.info('Downloading \'%s\'', os.path.abspath(file_name))
with open(file_name, 'wb') as file_stream:
self.download_to_stream(params, file_stream)
def download_to_stream(self, params, output_stream): # type: (KeeperParams, BinaryIO) -> int
with requests.get(self.url, proxies=params.rest_context.proxies, stream=True) as rq_http:
if self.success_status_code != rq_http.status_code:
logging.warning('HTTP status code: %d', rq_http.status_code)
crypter = crypto.StreamCrypter()
crypter.is_gcm = self.is_gcm_encrypted
crypter.key = self.encryption_key
with crypter.set_stream(rq_http.raw, for_encrypt=False) as attachment:
shutil.copyfileobj(attachment, output_stream, 10240)
output_stream.flush()
return crypter.bytes_read | 0.561575 | 0.13589 |
from django.shortcuts import render, HttpResponse
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework import status
from django.contrib.auth.models import User
from .serializers import UserSerializer
from rest_framework.decorators import api_view, renderer_classes,authentication_classes, permission_classes
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny
# user logic code
@csrf_exempt
@api_view(['GET', 'PUT', 'DELETE'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def user(request, id):
try:
product = User.objects.get(id=id)
print(product)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UserSerializer(product)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = UserSerializer(product, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
product.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@csrf_exempt
@api_view(['POST', 'GET'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def users(request):
if request.method == 'GET':
snippets = User.objects.filter(is_superuser=False)
serializer = UserSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# owner logic code
@csrf_exempt
@api_view(['POST', 'GET'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def owners(request):
if request.method == 'GET':
snippets = User.objects.filter(is_superuser=True)
serializer = UserSerializer(snippets, many=True)
return Response(serializer.data)
@csrf_exempt
@api_view(['GET', 'PUT', 'DELETE'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def owner(request, id):
try:
product = User.objects.get(id=id)
print(product)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UserSerializer(product)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = UserSerializer(product, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
product.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | users/views.py | from django.shortcuts import render, HttpResponse
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework import status
from django.contrib.auth.models import User
from .serializers import UserSerializer
from rest_framework.decorators import api_view, renderer_classes,authentication_classes, permission_classes
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny
# user logic code
@csrf_exempt
@api_view(['GET', 'PUT', 'DELETE'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def user(request, id):
try:
product = User.objects.get(id=id)
print(product)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UserSerializer(product)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = UserSerializer(product, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
product.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@csrf_exempt
@api_view(['POST', 'GET'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def users(request):
if request.method == 'GET':
snippets = User.objects.filter(is_superuser=False)
serializer = UserSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# owner logic code
@csrf_exempt
@api_view(['POST', 'GET'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def owners(request):
if request.method == 'GET':
snippets = User.objects.filter(is_superuser=True)
serializer = UserSerializer(snippets, many=True)
return Response(serializer.data)
@csrf_exempt
@api_view(['GET', 'PUT', 'DELETE'])
@authentication_classes([TokenAuthentication])
@permission_classes([AllowAny])
def owner(request, id):
try:
product = User.objects.get(id=id)
print(product)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UserSerializer(product)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = UserSerializer(product, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
product.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | 0.454956 | 0.070977 |
from PyQt5.QtWidgets import QSystemTrayIcon, QAction, QMenu
from PyQt5.QtGui import QIcon
from timer import PomoTimer
from pypresence import Presence
class System_tray():
def __init__(self, tray, app, times, subject, pref_win):
self.times = times
self.main_time = times[0]
self.app = app
self.tray = tray
self.subject = subject
self.pref_win = pref_win
self.label = QAction(str(self.main_time)+":00")
try:
self.RPC = Presence("729011176477818890") # Initialize the Presence client
self.RPC.connect() # Start the handshake loop
except:
print("You don't have a discord app open")
def setupUi(self):
# Create Menu
self.menu = QMenu()
self.tray.setContextMenu(self.menu)
self.tray.setIcon(QIcon("material/images/tomato.png"))
self.tray.setVisible(True)
# Create and add Menu Actions
self.preferences_btt = QAction("Preferences")
self.preferences_btt.triggered.connect(self.preferences)
self.quit = QAction("Quit")
self.quit.triggered.connect(self.app.quit)
self.start_btt = QAction("Start")
self.start_btt.triggered.connect(self.start)
self.pause_btt = QAction("Pause")
self.pause_btt.triggered.connect(self.pause)
self.pause_btt.setVisible(False)
self.reset_btt = QAction("Reset")
self.reset_btt.triggered.connect(self.reset)
self.reset_btt.setVisible(False)
self.menu.addAction(self.label)
self.menu.addSeparator()
self.menu.addActions([self.start_btt, self.pause_btt, self.reset_btt, self.preferences_btt, self.quit])
self.menu.addMenu
def start(self):
print("Start")
self.timer_main.startTimer()
self.start_btt.setVisible(False)
self.pause_btt.setVisible(True)
self.reset_btt.setVisible(True)
def pause(self):
print("Pause")
self.timer_main.pauseTimer()
self.start_btt.setVisible(True)
self.pause_btt.setVisible(False)
def preferences(self):
print("Preferences")
self.pref_win.show()
def reset(self):
self.timer_main.resetTimer()
self.pause_btt.setVisible(False)
self.start_btt.setVisible(True)
self.reset_btt.setVisible(False)
def createTimer(self):
# Creates a timer
self.timer_main = PomoTimer(self.times, self.label, self.tray, self.RPC, self.subject) | tray.py | from PyQt5.QtWidgets import QSystemTrayIcon, QAction, QMenu
from PyQt5.QtGui import QIcon
from timer import PomoTimer
from pypresence import Presence
class System_tray():
def __init__(self, tray, app, times, subject, pref_win):
self.times = times
self.main_time = times[0]
self.app = app
self.tray = tray
self.subject = subject
self.pref_win = pref_win
self.label = QAction(str(self.main_time)+":00")
try:
self.RPC = Presence("729011176477818890") # Initialize the Presence client
self.RPC.connect() # Start the handshake loop
except:
print("You don't have a discord app open")
def setupUi(self):
# Create Menu
self.menu = QMenu()
self.tray.setContextMenu(self.menu)
self.tray.setIcon(QIcon("material/images/tomato.png"))
self.tray.setVisible(True)
# Create and add Menu Actions
self.preferences_btt = QAction("Preferences")
self.preferences_btt.triggered.connect(self.preferences)
self.quit = QAction("Quit")
self.quit.triggered.connect(self.app.quit)
self.start_btt = QAction("Start")
self.start_btt.triggered.connect(self.start)
self.pause_btt = QAction("Pause")
self.pause_btt.triggered.connect(self.pause)
self.pause_btt.setVisible(False)
self.reset_btt = QAction("Reset")
self.reset_btt.triggered.connect(self.reset)
self.reset_btt.setVisible(False)
self.menu.addAction(self.label)
self.menu.addSeparator()
self.menu.addActions([self.start_btt, self.pause_btt, self.reset_btt, self.preferences_btt, self.quit])
self.menu.addMenu
def start(self):
print("Start")
self.timer_main.startTimer()
self.start_btt.setVisible(False)
self.pause_btt.setVisible(True)
self.reset_btt.setVisible(True)
def pause(self):
print("Pause")
self.timer_main.pauseTimer()
self.start_btt.setVisible(True)
self.pause_btt.setVisible(False)
def preferences(self):
print("Preferences")
self.pref_win.show()
def reset(self):
self.timer_main.resetTimer()
self.pause_btt.setVisible(False)
self.start_btt.setVisible(True)
self.reset_btt.setVisible(False)
def createTimer(self):
# Creates a timer
self.timer_main = PomoTimer(self.times, self.label, self.tray, self.RPC, self.subject) | 0.350533 | 0.069668 |
import os
import sys
sys.path.append('../../arl-python')
import numpy as np
import time
import argparse
from arl.image.cleaners import *
from utils import *
def msmfsclean_simplify(dirty, psf, window, gain, thresh, niter, scales, fracthresh, findpeak='CASA'):
assert 0.0 < gain < 2.0
assert niter > 0
assert len(scales) > 0
m_model = np.zeros(dirty.shape)
nscales = len(scales)
pmax = psf.max()
assert pmax > 0.0
psfpeak = np.argmax(np.fabs(psf))
dmax = dirty.max()
dpeak = np.argmax(dirty)
lpsf = psf / pmax
ldirty = dirty / pmax
nmoments, ny, nx = dirty.shape
assert psf.shape[0] == 2 * nmoments
# Create the "scale basis functions" in Algorithm 1
scaleshape = [nscales, ldirty.shape[1], ldirty.shape[2]]
scalestack = create_scalestack(scaleshape, scales, norm=True)
pscaleshape = [nscales, lpsf.shape[1], lpsf.shape[2]]
pscalestack = create_scalestack(pscaleshape, scales, norm=True)
# Calculate scale convolutions of moment residuals
smresidual = calculate_scale_moment_residual(ldirty, scalestack)
smresidual0 = smresidual.copy()
# Calculate scale scale moment moment psf, Hessian, and inverse of Hessian
# scale scale moment moment psf is needed for update of scale-moment residuals
# Hessian is needed in calculation of optimum for any iteration
# Inverse Hessian is needed to calculate principal solution in moment-space
ssmmpsf = calculate_scale_scale_moment_moment_psf(lpsf, pscalestack)
hsmmpsf, ihsmmpsf = calculate_scale_inverse_moment_moment_hessian(ssmmpsf)
# The window is scale dependent - we form it by smoothing and thresholding
# the input window. This prevents components being placed too close to the
# edge of the Image.
if window is None:
windowstack = None
else:
windowstack = np.zeros_like(scalestack)
windowstack[convolve_scalestack(scalestack, window) > 0.9] = 1.0
absolutethresh = max(thresh, fracthresh * np.fabs(smresidual[0, 0, :, :]).max())
# Start iterations
scale_counts = np.zeros(nscales, dtype='int')
scale_flux = np.zeros(nscales)
# Use original algorithm
start = time.time()
for i in range(niter):
# Find the optimum scale and location.
mscale, mx, my, mval = find_global_optimum(hsmmpsf, ihsmmpsf, smresidual, windowstack, findpeak)
scale_counts[mscale] += 1
scale_flux[mscale] += mval[0]
# Are we ready to stop yet?
peak = np.max(np.fabs(mval))
if peak < absolutethresh:
break
# Calculate indices needed for lhs and rhs of updates to model and residual
lhs, rhs = overlapIndices(ldirty[0, ...], psf[0, ...], mx, my)
m_model = update_moment_model(m_model, pscalestack, lhs, rhs, gain, mscale, mval)
smresidual = update_scale_moment_residual(smresidual, ssmmpsf, lhs, rhs, gain, mscale, mval)
residual = pmax * smresidual[0, :, :, :]
stop = time.time()
print('Original Time: {:.2f}s'.format(stop - start))
return m_model, residual, pscalestack, smresidual0, \
ssmmpsf, hsmmpsf, ihsmmpsf, ldirty, psf
def test_cleaners(data_dir, niter, gain, thresh, fracthresh, nscales, nmoments, nx, ny):
dirty = create_random_data((nmoments, ny, nx), -100, 100, 'float')
psf = create_random_data((nmoments*2, ny, nx), -5, 5, 'float')
m_model, residual, pscalestack, smresidual0, \
ssmmpsf, hsmmpsf, ihsmmpsf, ldirty, psf \
= msmfsclean_simplify(dirty, psf, None, gain=gain, thresh=thresh, niter=niter, scales=[0, 3, 10, 30],\
fracthresh=fracthresh, findpeak='ARL')
store_data(os.path.join(data_dir, 'm_model.dat'), m_model)
store_data(os.path.join(data_dir, 'residual.dat'), residual)
store_data(os.path.join(data_dir, 'pscalestack.dat'), pscalestack)
store_data(os.path.join(data_dir, 'smresidual.dat'), smresidual0)
store_data(os.path.join(data_dir, 'ssmmpsf.dat'), ssmmpsf)
store_data(os.path.join(data_dir, 'hsmmpsf.dat'), hsmmpsf)
store_data(os.path.join(data_dir, 'ihsmmpsf.dat'), ihsmmpsf)
store_data(os.path.join(data_dir, 'ldirty.dat'), ldirty)
store_data(os.path.join(data_dir, 'psf.dat'), psf)
if __name__ == '__main__':
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--niter', type=int, default=0)
parser.add_argument('--gain', type=float, default=0.0)
parser.add_argument('--thresh', type=float, default=0.0)
parser.add_argument('--fracthresh', type=float, default=0.0)
parser.add_argument('--nscales', type=int, default=0)
parser.add_argument('--nmoments', type=int, default=0)
parser.add_argument('--nx', type=int, default=0)
parser.add_argument('--ny', type=int, default=0)
args = parser.parse_args()
test_cleaners(args.data_dir, args.niter, args.gain, args.thresh, args.fracthresh, \
args.nscales, args.nmoments, args.nx, args.ny) | c-kernel/test/test_cleaners.py | import os
import sys
sys.path.append('../../arl-python')
import numpy as np
import time
import argparse
from arl.image.cleaners import *
from utils import *
def msmfsclean_simplify(dirty, psf, window, gain, thresh, niter, scales, fracthresh, findpeak='CASA'):
assert 0.0 < gain < 2.0
assert niter > 0
assert len(scales) > 0
m_model = np.zeros(dirty.shape)
nscales = len(scales)
pmax = psf.max()
assert pmax > 0.0
psfpeak = np.argmax(np.fabs(psf))
dmax = dirty.max()
dpeak = np.argmax(dirty)
lpsf = psf / pmax
ldirty = dirty / pmax
nmoments, ny, nx = dirty.shape
assert psf.shape[0] == 2 * nmoments
# Create the "scale basis functions" in Algorithm 1
scaleshape = [nscales, ldirty.shape[1], ldirty.shape[2]]
scalestack = create_scalestack(scaleshape, scales, norm=True)
pscaleshape = [nscales, lpsf.shape[1], lpsf.shape[2]]
pscalestack = create_scalestack(pscaleshape, scales, norm=True)
# Calculate scale convolutions of moment residuals
smresidual = calculate_scale_moment_residual(ldirty, scalestack)
smresidual0 = smresidual.copy()
# Calculate scale scale moment moment psf, Hessian, and inverse of Hessian
# scale scale moment moment psf is needed for update of scale-moment residuals
# Hessian is needed in calculation of optimum for any iteration
# Inverse Hessian is needed to calculate principal solution in moment-space
ssmmpsf = calculate_scale_scale_moment_moment_psf(lpsf, pscalestack)
hsmmpsf, ihsmmpsf = calculate_scale_inverse_moment_moment_hessian(ssmmpsf)
# The window is scale dependent - we form it by smoothing and thresholding
# the input window. This prevents components being placed too close to the
# edge of the Image.
if window is None:
windowstack = None
else:
windowstack = np.zeros_like(scalestack)
windowstack[convolve_scalestack(scalestack, window) > 0.9] = 1.0
absolutethresh = max(thresh, fracthresh * np.fabs(smresidual[0, 0, :, :]).max())
# Start iterations
scale_counts = np.zeros(nscales, dtype='int')
scale_flux = np.zeros(nscales)
# Use original algorithm
start = time.time()
for i in range(niter):
# Find the optimum scale and location.
mscale, mx, my, mval = find_global_optimum(hsmmpsf, ihsmmpsf, smresidual, windowstack, findpeak)
scale_counts[mscale] += 1
scale_flux[mscale] += mval[0]
# Are we ready to stop yet?
peak = np.max(np.fabs(mval))
if peak < absolutethresh:
break
# Calculate indices needed for lhs and rhs of updates to model and residual
lhs, rhs = overlapIndices(ldirty[0, ...], psf[0, ...], mx, my)
m_model = update_moment_model(m_model, pscalestack, lhs, rhs, gain, mscale, mval)
smresidual = update_scale_moment_residual(smresidual, ssmmpsf, lhs, rhs, gain, mscale, mval)
residual = pmax * smresidual[0, :, :, :]
stop = time.time()
print('Original Time: {:.2f}s'.format(stop - start))
return m_model, residual, pscalestack, smresidual0, \
ssmmpsf, hsmmpsf, ihsmmpsf, ldirty, psf
def test_cleaners(data_dir, niter, gain, thresh, fracthresh, nscales, nmoments, nx, ny):
dirty = create_random_data((nmoments, ny, nx), -100, 100, 'float')
psf = create_random_data((nmoments*2, ny, nx), -5, 5, 'float')
m_model, residual, pscalestack, smresidual0, \
ssmmpsf, hsmmpsf, ihsmmpsf, ldirty, psf \
= msmfsclean_simplify(dirty, psf, None, gain=gain, thresh=thresh, niter=niter, scales=[0, 3, 10, 30],\
fracthresh=fracthresh, findpeak='ARL')
store_data(os.path.join(data_dir, 'm_model.dat'), m_model)
store_data(os.path.join(data_dir, 'residual.dat'), residual)
store_data(os.path.join(data_dir, 'pscalestack.dat'), pscalestack)
store_data(os.path.join(data_dir, 'smresidual.dat'), smresidual0)
store_data(os.path.join(data_dir, 'ssmmpsf.dat'), ssmmpsf)
store_data(os.path.join(data_dir, 'hsmmpsf.dat'), hsmmpsf)
store_data(os.path.join(data_dir, 'ihsmmpsf.dat'), ihsmmpsf)
store_data(os.path.join(data_dir, 'ldirty.dat'), ldirty)
store_data(os.path.join(data_dir, 'psf.dat'), psf)
if __name__ == '__main__':
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--niter', type=int, default=0)
parser.add_argument('--gain', type=float, default=0.0)
parser.add_argument('--thresh', type=float, default=0.0)
parser.add_argument('--fracthresh', type=float, default=0.0)
parser.add_argument('--nscales', type=int, default=0)
parser.add_argument('--nmoments', type=int, default=0)
parser.add_argument('--nx', type=int, default=0)
parser.add_argument('--ny', type=int, default=0)
args = parser.parse_args()
test_cleaners(args.data_dir, args.niter, args.gain, args.thresh, args.fracthresh, \
args.nscales, args.nmoments, args.nx, args.ny) | 0.502441 | 0.536313 |
import dataclasses
import enum
import sphinx.application
import sphinx.environment
from typing import *
T = TypeVar('T')
class Converter:
"""
Converters are used to parse and validate directive and global options.
They are used as a more powerful substitute for helper functions declared
in `rst.directives`.
"""
def from_str(self, value: str):
"""
Parses string and returns a value.
Invoked when parsing directive arguments.
"""
raise NotImplementedError
def from_any(self, value: Any):
"""
Validate (and probably convert) object of any type.
Intended to validate values loaded from conf.py,
but currently not in use.
"""
raise NotImplementedError
def __call__(self, value: str):
"""
Calls `from_str()`.
With this method present, converters can be used in ``option_spec``.
"""
return self.from_str(value)
def __str__(self):
"""
String representation used as a value description in rst autodoc.
"""
return '...'
class StrConverter(Converter):
"""
Generic converter for stings.
"""
def __init__(self, min_len=0, max_len=None, regex=None):
"""
:param min_len: if given, checks that string is at least this long.
:param max_len: if given, checks that string is at most this long.
:param regex: if given, string will be matched against this regular
expression via `re.match`.
"""
self.min_len = min_len
self.max_len = max_len
self.regex = regex
def from_str(self, value: str):
value = value.strip()
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, str):
raise ValueError(f'expected string, got {type(value)}')
if self.min_len is not None and len(value) < self.min_len:
raise ValueError(f'should be at least {self.min_len} symbols long')
if self.max_len is not None and len(value) > self.max_len:
raise ValueError(f'should be at most {self.min_len} symbols long')
if self.regex is not None:
import re
if re.match(self.regex, value) is None:
raise ValueError(f'should match regex "{self.regex}"')
return value
def __str__(self):
return '<str>'
class IntConverter(Converter):
"""
Generic converter for ints.
"""
def __init__(self, min_val=None, max_val=None):
"""
:param min_val: if given, checks that int is no less than this value.
:param max_val: if given, checks that int is no greater than this value.
"""
self.min_val = min_val
self.max_val = max_val
def from_str(self, value: str):
try:
value = int(value)
except ValueError:
raise ValueError('should be an integer')
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, int):
raise ValueError(f'expected int, got {type(value)}')
if self.min_val is not None and value < self.min_val:
if self.min_val == 1:
raise ValueError(f'should be positive')
if self.min_val == 0:
raise ValueError(f'should not be negative')
raise ValueError(f'should be no less than {self.min_val}')
if self.max_val is not None and value > self.max_val:
if self.max_val == -1:
raise ValueError(f'should be negative')
if self.max_val == 0:
raise ValueError(f'should not be positive')
raise ValueError(f'should be no greater than {self.min_val}')
return value
def __str__(self):
return '<int>'
class FloatConverter(Converter):
"""
Generic converter for floats.
"""
def __init__(self, min_val=None, max_val=None):
"""
:param min_val: if given, checks that int is no less than this value.
:param max_val: if given, checks that int is no greater than this value.
"""
self.min_val = min_val
self.max_val = max_val
def from_str(self, value: str):
try:
value = float(value)
except ValueError:
raise ValueError('should be a float')
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, (float, int)):
raise ValueError(f'expected float, got {type(value)}')
value = float(value)
if self.min_val is not None and value < self.min_val:
if self.min_val == 0:
raise ValueError(f'should not be negative')
raise ValueError(f'should be no less than {self.min_val}')
if self.max_val is not None and value > self.max_val:
if self.max_val == 0:
raise ValueError(f'should not be positive')
raise ValueError(f'should be no greater than {self.min_val}')
return value
def __str__(self):
return '<float>'
class ListConverter(Converter):
"""
Parses space- or comma-separated lists, similar to `positive_int_list`.
"""
def __init__(self, u: Converter, min_len=0, max_len=None):
"""
:param u: nested converter which will be used to parse list elements.
:param min_len: if given, checks that list is at least this long.
:param max_len: if given, checks that list is at most this long.
"""
self.u = u
self.min_len = min_len
self.max_len = max_len
def from_str(self, value: str):
if ',' in value:
value = value.split(',')
else:
value = value.split()
self.check_len(value)
result = []
for i, v in enumerate(value):
result.append(self.u.from_str(v))
return result
def from_any(self, value: Any):
if not isinstance(value, (list, tuple)):
raise ValueError(f'expected list, got {type(value)}')
self.check_len(value)
result = []
for i, v in enumerate(value):
result.append(self.u.from_any(v))
return result
def check_len(self, value):
if self.min_len is not None and len(value) < self.min_len:
raise ValueError(f'should be at least {self.min_len} elements long')
if self.max_len is not None and len(value) > self.max_len:
raise ValueError(f'should be at most {self.min_len} elements long')
def __str__(self):
return f'{self.u}[, {self.u}[, ...]]'
class TupleConverter(Converter):
"""
Parses space- or comma-separated tuples.
"""
def __init__(self, *u: Converter):
"""
:param u: nested converters. Each tuple element will be parsed with the
corresponding converter.
"""
self.u = u
def from_str(self, value: str):
if ',' in value:
value = value.split(',')
else:
value = value.split()
self.check_len(value)
result = []
for i, (v, u) in enumerate(zip(value, self.u)):
result.append(u.from_str(v))
return result
def from_any(self, value: Any):
if not isinstance(value, (list, tuple)):
raise ValueError(f'expected tuple, got {type(value)}')
self.check_len(value)
result = []
for i, (v, u) in enumerate(zip(value, self.u)):
result.append(u.from_any(v))
return result
def check_len(self, value):
if len(value) != len(self.u):
raise ValueError(f'should contain exactly {len(self.u)} items')
def __str__(self):
return ', '.join(map(str, self.u))
class EnumConverter(Converter):
"""
Parses enums.
"""
def __init__(self, cls: Type[enum.Enum]):
"""
:param cls: enum class (from the standard python `enum` module).
"""
self.cls = cls
def from_str(self, value: str):
value_orig = value
value = value.strip().upper().replace('-', '_')
try:
return self.cls[value]
except KeyError:
items = ', '.join([repr(x.name) for x in self.cls])
raise ValueError(f'expected one of [{items}], got {value_orig!r} instead')
def from_any(self, value: Any):
if not isinstance(value, self.cls):
raise ValueError(f'expected {self.cls.__name__}, got {type(value)}')
return value
def __str__(self):
return '|'.join(map(lambda x: x.name.lower().replace('_', '-'), self.cls))
class BoolConverter(Converter):
"""
Converts ``'on'``, ``'off'``, ``'true'``, ``'false'`` strings.
"""
def from_str(self, value: str):
value = value.strip().lower()
if value in ['on', 'yes', 'true']:
return True
elif value in ['off', 'no', 'false']:
return False
else:
raise ValueError(f'expected one of [\'on\', \'yes\', \'true\', '
f'\'off\', \'no\', \'false\'], '
f'got {value!r} instead')
def from_any(self, value: Any):
if not isinstance(value, bool):
raise ValueError(f'expected bool, got {type(value)}')
return value
def __str__(self):
return 'True|False'
class FlagConverter(Converter):
"""
Converts empty strings to ``True``.
"""
def from_str(self, value: str):
if value:
raise ValueError('value is not expected')
return True
def from_any(self, value: Any):
if not isinstance(value, bool):
raise ValueError(f'expected bool, got {type(value)}')
return value
def __str__(self):
return ''
def make_converter(tp) -> Converter:
if tp is str:
return StrConverter()
elif tp is bool:
return BoolConverter()
elif tp is int:
return IntConverter()
elif tp is float:
return FloatConverter()
elif tp is list:
return ListConverter(StrConverter())
elif getattr(tp, '__origin__', None) is list:
return ListConverter(make_converter(tp.__args__[0]))
elif getattr(tp, '__origin__', None) is tuple:
if ... in tp.__args__:
raise TypeError('variadic tuples are not supported')
return TupleConverter(*[make_converter(a) for a in tp.__args__])
elif getattr(tp, '__origin__', None) is Union:
if len(tp.__args__) != 2 or type(None) not in tp.__args__:
raise TypeError('unions are not supported (optionals are, though)')
if tp.__args__[0] is type(None):
return make_converter(tp.__args__[1])
else:
return make_converter(tp.__args__[0])
elif isinstance(tp, type) and issubclass(tp, enum.Enum):
return EnumConverter(tp)
else:
raise TypeError(f'unsupported type {tp}')
def make_option_spec(cls):
options = {}
for field in dataclasses.fields(cls): # type: dataclasses.Field
name = field.name.replace('_', '-')
if 'converter' in field.metadata:
converter = field.metadata['converter']
elif field.type is bool:
converter = FlagConverter()
options['no-' + name] = FlagConverter()
else:
converter = make_converter(field.type)
options[name] = converter
return options
def _parse_options(cls, options, prefix=''):
result = {}
if prefix:
prefix += '-'
for field in dataclasses.fields(cls): # type: dataclasses.Field
name = prefix + field.name.replace('_', '-')
if name not in options and field.type is not bool:
continue
if field.type is bool:
if name in options:
result[field.name] = True
elif 'no-' + name in options:
result[field.name] = False
else:
result[field.name] = options[name]
return result
class NamespaceHolder:
def __init__(self, namespace: 'Namespace', prefix: str):
self.namespace = namespace
self.prefix = prefix
class Namespace(Generic[T]):
_cls: Type[T] = None
_prefix: str = None
_loaded: Optional[T] = None
def __init__(self, global_prefix: str, cls: Type[T]):
"""
:param global_prefix: prefix to be used when adding options
to ``conf.py`` and to the build environment. The prefix should be
unique across all namespaces registered in all loaded plugins so
it's best to use plugin name or domain name as a prefix.
:param cls: dataclass that contains the settings.
"""
self._prefix = global_prefix
self._cls = cls
def fields(self) -> Iterator[dataclasses.Field]:
return dataclasses.fields(self._cls)
def no_global_fields(self) -> Iterator[dataclasses.Field]:
fields = self.fields()
return filter(lambda f: not f.metadata.get('no_global', False), fields)
def get_cls(self):
return self._cls
def make_option_spec(self, prefix: str = '') -> Dict[str, Converter]:
"""
Creates ``option_spec`` for use in rst directives.
For each boolean options this function will add a corresponding ``no-``
option.
:param prefix: if given, each option name will be prefixed. This is
useful to add settings that are not directly used by the directive
but instead used to override default settings for nested directives
via `push_settings()`.
:return: dict with option names as keys and converters as values.
"""
option_spec = make_option_spec(self._cls)
if prefix:
prefix += '-'
return {prefix + k: v for k, v in option_spec.items()}
else:
return option_spec
def register_settings(self, app: sphinx.application.Sphinx):
"""
Registers settings so that they can be loaded from ``conf.py``.
:param app: current sphinx application.
"""
prefix = self._prefix
if prefix:
prefix += '_'
for field in self.no_global_fields():
default = field.default
if field.default_factory is not dataclasses.MISSING:
default = self._make_default_factory(field.default_factory)
if default is dataclasses.MISSING:
default = None
rebuild = field.metadata.get('rebuild', False)
app.add_config_value(prefix + field.name, default, rebuild)
@staticmethod
def _make_default_factory(default_factory):
def factory(_):
return default_factory()
return factory
def load_global_settings(self, env: sphinx.environment.BuildEnvironment) -> T:
"""
Loads settings from ``conf.py``.
:param env: current build environment.
"""
prefix = self._prefix
if prefix:
prefix += '_'
if self._loaded is None:
options = {}
for field in self.no_global_fields():
options[field.name] = env.config[prefix + field.name]
self._loaded = self._cls(**options)
return self._loaded
def load_settings(self, env: sphinx.environment.BuildEnvironment) -> T:
"""
Loads settings local to the currently processed directive.
If settings stack is not empty, loads last pushed settings, otherwise
loads global settings.
See `push_settings()` and `pop_settings()`.
:param env: current build environment.
"""
stack = self._get_stack(env)
if not stack:
return self.load_global_settings(env)
else:
return stack[-1]
def push_settings(self, env: sphinx.environment.BuildEnvironment, s: T):
"""
Pushes settings to the local stack.
All calls to `load_settings()` will return settings passed to this
function unless new portion of settings is pushed or this settings
are popped from the stack.
This function is intended to be called from `before_content()`
to redefine default settings for nested directives.
See `load_settings()` and `pop_settings()`.
:param env: current build environment.
:param s: new settings.
"""
stack = self._get_stack(env)
stack.append(s)
def pop_settings(self, env: sphinx.environment.BuildEnvironment):
"""
Pops settings from the local stack.
This function is intended to be called from `after_content` to undo
all changes made by calling `push_settings()` from `before_content()`.
See `load_settings()` and `push_settings()`.
:param env: current build environment.
"""
stack = self._get_stack(env)
stack.pop()
def load_from_options(self, options: dict,
env: sphinx.environment.BuildEnvironment,
prefix: str = '') -> T:
"""
Load settings from parsed options and merge them with local settings.
Ignores every option that's not used by this namespace. One can add
options from multiple namespaces as long as all options have unique
names.
Honors ``no-`` options added by `make_option_spec()`.
:param options: parsed directive options.
:param env: current build environment.
:param prefix: prefix that was used in `make_option_spec()`.
:return: parsed settings.
"""
options = _parse_options(self._cls, options, prefix)
local_options = self.load_settings(env)
return dataclasses.replace(local_options, **options)
def _get_stack(self, env: sphinx.environment.BuildEnvironment):
namespaces = env.temp_data.setdefault('configurator_namespaces', {})
return namespaces.setdefault(self._prefix, [])
def for_directive(self, prefix='') -> T:
return NamespaceHolder(self, prefix)
class ManagedDirectiveType(type):
def __new__(mcs, name, bases, members):
option_spec = {}
namespace_attrs: Dict[str, NamespaceHolder] = {}
for base in bases:
new_namespace_attrs: Set[NamespaceHolder] = getattr(base, '_namespace_attrs_', {}) or {}
for new_ns in new_namespace_attrs:
if new_ns.prefix in namespace_attrs:
ns = namespace_attrs[new_ns.prefix]
raise TypeError(
f'cannot combine namespace '
f'{new_ns.namespace.get_cls()} and '
f'{ns.namespace.get_cls()}'
)
namespace_attrs[new_ns.prefix] = new_ns
option_spec.update(getattr(base, 'option_spec', {}) or {})
option_spec.update(members.get('option_spec', {}))
for name, member in list(members.items()):
if isinstance(member, NamespaceHolder):
new_ns = member.namespace
if member.prefix in namespace_attrs:
ns = namespace_attrs[member.prefix].namespace
if not issubclass(new_ns.__class__, ns.__class__):
raise TypeError(
f'cannot override namespace {ns} with '
f'namespace {new_ns}: the later must be a subclass '
f'of the former'
)
namespace_attrs[member.prefix] = member
members[name] = mcs._make_settings_getter(
'_configurator_cache_' + name,
member.namespace,
member.prefix
)
option_spec.update(new_ns.make_option_spec(member.prefix))
members['option_spec'] = option_spec
members['_namespace_attrs_'] = set(namespace_attrs.values())
return super(ManagedDirectiveType, mcs).__new__(mcs, name, bases, members)
@staticmethod
def _make_settings_getter(name, namespace, prefix):
@property
def settings_getter(self):
if not hasattr(self, name):
settings = namespace.load_from_options(
self.options,
self.state.document.settings.env,
prefix
)
setattr(self, name, settings)
return getattr(self, name)
return settings_getter
class ManagedDirective(metaclass=ManagedDirectiveType):
def push_settings(self, namespace: Namespace[T], value: T):
namespace.push_settings(self.state.document.settings.env, value)
def pop_settings(self, namespace: Namespace):
namespace.pop_settings(self.state.document.settings.env) | sphinx_a4doc/contrib/configurator.py | import dataclasses
import enum
import sphinx.application
import sphinx.environment
from typing import *
T = TypeVar('T')
class Converter:
"""
Converters are used to parse and validate directive and global options.
They are used as a more powerful substitute for helper functions declared
in `rst.directives`.
"""
def from_str(self, value: str):
"""
Parses string and returns a value.
Invoked when parsing directive arguments.
"""
raise NotImplementedError
def from_any(self, value: Any):
"""
Validate (and probably convert) object of any type.
Intended to validate values loaded from conf.py,
but currently not in use.
"""
raise NotImplementedError
def __call__(self, value: str):
"""
Calls `from_str()`.
With this method present, converters can be used in ``option_spec``.
"""
return self.from_str(value)
def __str__(self):
"""
String representation used as a value description in rst autodoc.
"""
return '...'
class StrConverter(Converter):
"""
Generic converter for stings.
"""
def __init__(self, min_len=0, max_len=None, regex=None):
"""
:param min_len: if given, checks that string is at least this long.
:param max_len: if given, checks that string is at most this long.
:param regex: if given, string will be matched against this regular
expression via `re.match`.
"""
self.min_len = min_len
self.max_len = max_len
self.regex = regex
def from_str(self, value: str):
value = value.strip()
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, str):
raise ValueError(f'expected string, got {type(value)}')
if self.min_len is not None and len(value) < self.min_len:
raise ValueError(f'should be at least {self.min_len} symbols long')
if self.max_len is not None and len(value) > self.max_len:
raise ValueError(f'should be at most {self.min_len} symbols long')
if self.regex is not None:
import re
if re.match(self.regex, value) is None:
raise ValueError(f'should match regex "{self.regex}"')
return value
def __str__(self):
return '<str>'
class IntConverter(Converter):
"""
Generic converter for ints.
"""
def __init__(self, min_val=None, max_val=None):
"""
:param min_val: if given, checks that int is no less than this value.
:param max_val: if given, checks that int is no greater than this value.
"""
self.min_val = min_val
self.max_val = max_val
def from_str(self, value: str):
try:
value = int(value)
except ValueError:
raise ValueError('should be an integer')
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, int):
raise ValueError(f'expected int, got {type(value)}')
if self.min_val is not None and value < self.min_val:
if self.min_val == 1:
raise ValueError(f'should be positive')
if self.min_val == 0:
raise ValueError(f'should not be negative')
raise ValueError(f'should be no less than {self.min_val}')
if self.max_val is not None and value > self.max_val:
if self.max_val == -1:
raise ValueError(f'should be negative')
if self.max_val == 0:
raise ValueError(f'should not be positive')
raise ValueError(f'should be no greater than {self.min_val}')
return value
def __str__(self):
return '<int>'
class FloatConverter(Converter):
"""
Generic converter for floats.
"""
def __init__(self, min_val=None, max_val=None):
"""
:param min_val: if given, checks that int is no less than this value.
:param max_val: if given, checks that int is no greater than this value.
"""
self.min_val = min_val
self.max_val = max_val
def from_str(self, value: str):
try:
value = float(value)
except ValueError:
raise ValueError('should be a float')
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, (float, int)):
raise ValueError(f'expected float, got {type(value)}')
value = float(value)
if self.min_val is not None and value < self.min_val:
if self.min_val == 0:
raise ValueError(f'should not be negative')
raise ValueError(f'should be no less than {self.min_val}')
if self.max_val is not None and value > self.max_val:
if self.max_val == 0:
raise ValueError(f'should not be positive')
raise ValueError(f'should be no greater than {self.min_val}')
return value
def __str__(self):
return '<float>'
class ListConverter(Converter):
"""
Parses space- or comma-separated lists, similar to `positive_int_list`.
"""
def __init__(self, u: Converter, min_len=0, max_len=None):
"""
:param u: nested converter which will be used to parse list elements.
:param min_len: if given, checks that list is at least this long.
:param max_len: if given, checks that list is at most this long.
"""
self.u = u
self.min_len = min_len
self.max_len = max_len
def from_str(self, value: str):
if ',' in value:
value = value.split(',')
else:
value = value.split()
self.check_len(value)
result = []
for i, v in enumerate(value):
result.append(self.u.from_str(v))
return result
def from_any(self, value: Any):
if not isinstance(value, (list, tuple)):
raise ValueError(f'expected list, got {type(value)}')
self.check_len(value)
result = []
for i, v in enumerate(value):
result.append(self.u.from_any(v))
return result
def check_len(self, value):
if self.min_len is not None and len(value) < self.min_len:
raise ValueError(f'should be at least {self.min_len} elements long')
if self.max_len is not None and len(value) > self.max_len:
raise ValueError(f'should be at most {self.min_len} elements long')
def __str__(self):
return f'{self.u}[, {self.u}[, ...]]'
class TupleConverter(Converter):
"""
Parses space- or comma-separated tuples.
"""
def __init__(self, *u: Converter):
"""
:param u: nested converters. Each tuple element will be parsed with the
corresponding converter.
"""
self.u = u
def from_str(self, value: str):
if ',' in value:
value = value.split(',')
else:
value = value.split()
self.check_len(value)
result = []
for i, (v, u) in enumerate(zip(value, self.u)):
result.append(u.from_str(v))
return result
def from_any(self, value: Any):
if not isinstance(value, (list, tuple)):
raise ValueError(f'expected tuple, got {type(value)}')
self.check_len(value)
result = []
for i, (v, u) in enumerate(zip(value, self.u)):
result.append(u.from_any(v))
return result
def check_len(self, value):
if len(value) != len(self.u):
raise ValueError(f'should contain exactly {len(self.u)} items')
def __str__(self):
return ', '.join(map(str, self.u))
class EnumConverter(Converter):
"""
Parses enums.
"""
def __init__(self, cls: Type[enum.Enum]):
"""
:param cls: enum class (from the standard python `enum` module).
"""
self.cls = cls
def from_str(self, value: str):
value_orig = value
value = value.strip().upper().replace('-', '_')
try:
return self.cls[value]
except KeyError:
items = ', '.join([repr(x.name) for x in self.cls])
raise ValueError(f'expected one of [{items}], got {value_orig!r} instead')
def from_any(self, value: Any):
if not isinstance(value, self.cls):
raise ValueError(f'expected {self.cls.__name__}, got {type(value)}')
return value
def __str__(self):
return '|'.join(map(lambda x: x.name.lower().replace('_', '-'), self.cls))
class BoolConverter(Converter):
"""
Converts ``'on'``, ``'off'``, ``'true'``, ``'false'`` strings.
"""
def from_str(self, value: str):
value = value.strip().lower()
if value in ['on', 'yes', 'true']:
return True
elif value in ['off', 'no', 'false']:
return False
else:
raise ValueError(f'expected one of [\'on\', \'yes\', \'true\', '
f'\'off\', \'no\', \'false\'], '
f'got {value!r} instead')
def from_any(self, value: Any):
if not isinstance(value, bool):
raise ValueError(f'expected bool, got {type(value)}')
return value
def __str__(self):
return 'True|False'
class FlagConverter(Converter):
"""
Converts empty strings to ``True``.
"""
def from_str(self, value: str):
if value:
raise ValueError('value is not expected')
return True
def from_any(self, value: Any):
if not isinstance(value, bool):
raise ValueError(f'expected bool, got {type(value)}')
return value
def __str__(self):
return ''
def make_converter(tp) -> Converter:
if tp is str:
return StrConverter()
elif tp is bool:
return BoolConverter()
elif tp is int:
return IntConverter()
elif tp is float:
return FloatConverter()
elif tp is list:
return ListConverter(StrConverter())
elif getattr(tp, '__origin__', None) is list:
return ListConverter(make_converter(tp.__args__[0]))
elif getattr(tp, '__origin__', None) is tuple:
if ... in tp.__args__:
raise TypeError('variadic tuples are not supported')
return TupleConverter(*[make_converter(a) for a in tp.__args__])
elif getattr(tp, '__origin__', None) is Union:
if len(tp.__args__) != 2 or type(None) not in tp.__args__:
raise TypeError('unions are not supported (optionals are, though)')
if tp.__args__[0] is type(None):
return make_converter(tp.__args__[1])
else:
return make_converter(tp.__args__[0])
elif isinstance(tp, type) and issubclass(tp, enum.Enum):
return EnumConverter(tp)
else:
raise TypeError(f'unsupported type {tp}')
def make_option_spec(cls):
options = {}
for field in dataclasses.fields(cls): # type: dataclasses.Field
name = field.name.replace('_', '-')
if 'converter' in field.metadata:
converter = field.metadata['converter']
elif field.type is bool:
converter = FlagConverter()
options['no-' + name] = FlagConverter()
else:
converter = make_converter(field.type)
options[name] = converter
return options
def _parse_options(cls, options, prefix=''):
result = {}
if prefix:
prefix += '-'
for field in dataclasses.fields(cls): # type: dataclasses.Field
name = prefix + field.name.replace('_', '-')
if name not in options and field.type is not bool:
continue
if field.type is bool:
if name in options:
result[field.name] = True
elif 'no-' + name in options:
result[field.name] = False
else:
result[field.name] = options[name]
return result
class NamespaceHolder:
def __init__(self, namespace: 'Namespace', prefix: str):
self.namespace = namespace
self.prefix = prefix
class Namespace(Generic[T]):
_cls: Type[T] = None
_prefix: str = None
_loaded: Optional[T] = None
def __init__(self, global_prefix: str, cls: Type[T]):
"""
:param global_prefix: prefix to be used when adding options
to ``conf.py`` and to the build environment. The prefix should be
unique across all namespaces registered in all loaded plugins so
it's best to use plugin name or domain name as a prefix.
:param cls: dataclass that contains the settings.
"""
self._prefix = global_prefix
self._cls = cls
def fields(self) -> Iterator[dataclasses.Field]:
return dataclasses.fields(self._cls)
def no_global_fields(self) -> Iterator[dataclasses.Field]:
fields = self.fields()
return filter(lambda f: not f.metadata.get('no_global', False), fields)
def get_cls(self):
return self._cls
def make_option_spec(self, prefix: str = '') -> Dict[str, Converter]:
"""
Creates ``option_spec`` for use in rst directives.
For each boolean options this function will add a corresponding ``no-``
option.
:param prefix: if given, each option name will be prefixed. This is
useful to add settings that are not directly used by the directive
but instead used to override default settings for nested directives
via `push_settings()`.
:return: dict with option names as keys and converters as values.
"""
option_spec = make_option_spec(self._cls)
if prefix:
prefix += '-'
return {prefix + k: v for k, v in option_spec.items()}
else:
return option_spec
def register_settings(self, app: sphinx.application.Sphinx):
"""
Registers settings so that they can be loaded from ``conf.py``.
:param app: current sphinx application.
"""
prefix = self._prefix
if prefix:
prefix += '_'
for field in self.no_global_fields():
default = field.default
if field.default_factory is not dataclasses.MISSING:
default = self._make_default_factory(field.default_factory)
if default is dataclasses.MISSING:
default = None
rebuild = field.metadata.get('rebuild', False)
app.add_config_value(prefix + field.name, default, rebuild)
@staticmethod
def _make_default_factory(default_factory):
def factory(_):
return default_factory()
return factory
def load_global_settings(self, env: sphinx.environment.BuildEnvironment) -> T:
"""
Loads settings from ``conf.py``.
:param env: current build environment.
"""
prefix = self._prefix
if prefix:
prefix += '_'
if self._loaded is None:
options = {}
for field in self.no_global_fields():
options[field.name] = env.config[prefix + field.name]
self._loaded = self._cls(**options)
return self._loaded
def load_settings(self, env: sphinx.environment.BuildEnvironment) -> T:
"""
Loads settings local to the currently processed directive.
If settings stack is not empty, loads last pushed settings, otherwise
loads global settings.
See `push_settings()` and `pop_settings()`.
:param env: current build environment.
"""
stack = self._get_stack(env)
if not stack:
return self.load_global_settings(env)
else:
return stack[-1]
def push_settings(self, env: sphinx.environment.BuildEnvironment, s: T):
"""
Pushes settings to the local stack.
All calls to `load_settings()` will return settings passed to this
function unless new portion of settings is pushed or this settings
are popped from the stack.
This function is intended to be called from `before_content()`
to redefine default settings for nested directives.
See `load_settings()` and `pop_settings()`.
:param env: current build environment.
:param s: new settings.
"""
stack = self._get_stack(env)
stack.append(s)
def pop_settings(self, env: sphinx.environment.BuildEnvironment):
"""
Pops settings from the local stack.
This function is intended to be called from `after_content` to undo
all changes made by calling `push_settings()` from `before_content()`.
See `load_settings()` and `push_settings()`.
:param env: current build environment.
"""
stack = self._get_stack(env)
stack.pop()
def load_from_options(self, options: dict,
env: sphinx.environment.BuildEnvironment,
prefix: str = '') -> T:
"""
Load settings from parsed options and merge them with local settings.
Ignores every option that's not used by this namespace. One can add
options from multiple namespaces as long as all options have unique
names.
Honors ``no-`` options added by `make_option_spec()`.
:param options: parsed directive options.
:param env: current build environment.
:param prefix: prefix that was used in `make_option_spec()`.
:return: parsed settings.
"""
options = _parse_options(self._cls, options, prefix)
local_options = self.load_settings(env)
return dataclasses.replace(local_options, **options)
def _get_stack(self, env: sphinx.environment.BuildEnvironment):
namespaces = env.temp_data.setdefault('configurator_namespaces', {})
return namespaces.setdefault(self._prefix, [])
def for_directive(self, prefix='') -> T:
return NamespaceHolder(self, prefix)
class ManagedDirectiveType(type):
def __new__(mcs, name, bases, members):
option_spec = {}
namespace_attrs: Dict[str, NamespaceHolder] = {}
for base in bases:
new_namespace_attrs: Set[NamespaceHolder] = getattr(base, '_namespace_attrs_', {}) or {}
for new_ns in new_namespace_attrs:
if new_ns.prefix in namespace_attrs:
ns = namespace_attrs[new_ns.prefix]
raise TypeError(
f'cannot combine namespace '
f'{new_ns.namespace.get_cls()} and '
f'{ns.namespace.get_cls()}'
)
namespace_attrs[new_ns.prefix] = new_ns
option_spec.update(getattr(base, 'option_spec', {}) or {})
option_spec.update(members.get('option_spec', {}))
for name, member in list(members.items()):
if isinstance(member, NamespaceHolder):
new_ns = member.namespace
if member.prefix in namespace_attrs:
ns = namespace_attrs[member.prefix].namespace
if not issubclass(new_ns.__class__, ns.__class__):
raise TypeError(
f'cannot override namespace {ns} with '
f'namespace {new_ns}: the later must be a subclass '
f'of the former'
)
namespace_attrs[member.prefix] = member
members[name] = mcs._make_settings_getter(
'_configurator_cache_' + name,
member.namespace,
member.prefix
)
option_spec.update(new_ns.make_option_spec(member.prefix))
members['option_spec'] = option_spec
members['_namespace_attrs_'] = set(namespace_attrs.values())
return super(ManagedDirectiveType, mcs).__new__(mcs, name, bases, members)
@staticmethod
def _make_settings_getter(name, namespace, prefix):
@property
def settings_getter(self):
if not hasattr(self, name):
settings = namespace.load_from_options(
self.options,
self.state.document.settings.env,
prefix
)
setattr(self, name, settings)
return getattr(self, name)
return settings_getter
class ManagedDirective(metaclass=ManagedDirectiveType):
def push_settings(self, namespace: Namespace[T], value: T):
namespace.push_settings(self.state.document.settings.env, value)
def pop_settings(self, namespace: Namespace):
namespace.pop_settings(self.state.document.settings.env) | 0.861494 | 0.412708 |
import asyncio
import unittest.mock
import asynctest
import logging
import os
from functools import wraps
import shortuuid
from yarl import URL
from aio_pika import Connection, connect, Channel, Queue, Exchange
log = logging.getLogger(__name__)
for logger_name in ('pika.channel', 'pika.callback', 'pika.connection'):
logging.getLogger(logger_name).setLevel(logging.INFO)
logging.basicConfig(level=logging.DEBUG)
AMQP_URL = URL(os.getenv("AMQP_URL", "amqp://guest:guest@localhost"))
if not AMQP_URL.path:
AMQP_URL.path = '/'
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT)
)
@property
def _all_tasks(self):
return getattr(asyncio, 'all_tasks', asyncio.Task.all_tasks)
async def doCleanups(self):
outcome = self._outcome or unittest.mock._Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
with outcome.testPartExecutor(self):
if asyncio.iscoroutinefunction(function):
await self.loop.create_task(function(*args, **kwargs))
elif asyncio.iscoroutine(function):
await function
else:
function(*args, **kwargs)
return outcome.success
def get_random_name(self, *args):
prefix = ['test']
for item in args:
prefix.append(item)
prefix.append(shortuuid.uuid())
return ".".join(prefix)
class BaseTestCase(AsyncTestCase):
async def create_connection(self, cleanup=True) -> Connection:
client = await connect(str(AMQP_URL), loop=self.loop)
if cleanup:
self.addCleanup(client.close)
return client
async def create_channel(self, connection=None,
cleanup=True, **kwargs) -> Channel:
if connection is None:
connection = await self.create_connection()
channel = await connection.channel(**kwargs)
if cleanup:
self.addCleanup(channel.close)
return channel
async def declare_queue(self, *args, **kwargs) -> Queue:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
queue = await channel.declare_queue(*args, **kwargs)
self.addCleanup(queue.delete)
return queue
async def declare_exchange(self, *args, **kwargs) -> Exchange:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
exchange = await channel.declare_exchange(*args, **kwargs)
self.addCleanup(exchange.delete)
return exchange
def timeout(timeout_sec=5):
def decorator(func):
@wraps(func)
async def wrap(self, *args, **kwargs):
loop = self.loop
task = loop.create_task(func(self, *args, **kwargs))
def on_timeout():
if task.done():
return
task.cancel()
self.loop.call_later(timeout_sec, on_timeout)
return await task
return wrap
return decorator | tests/__init__.py | import asyncio
import unittest.mock
import asynctest
import logging
import os
from functools import wraps
import shortuuid
from yarl import URL
from aio_pika import Connection, connect, Channel, Queue, Exchange
log = logging.getLogger(__name__)
for logger_name in ('pika.channel', 'pika.callback', 'pika.connection'):
logging.getLogger(logger_name).setLevel(logging.INFO)
logging.basicConfig(level=logging.DEBUG)
AMQP_URL = URL(os.getenv("AMQP_URL", "amqp://guest:guest@localhost"))
if not AMQP_URL.path:
AMQP_URL.path = '/'
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT)
)
@property
def _all_tasks(self):
return getattr(asyncio, 'all_tasks', asyncio.Task.all_tasks)
async def doCleanups(self):
outcome = self._outcome or unittest.mock._Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
with outcome.testPartExecutor(self):
if asyncio.iscoroutinefunction(function):
await self.loop.create_task(function(*args, **kwargs))
elif asyncio.iscoroutine(function):
await function
else:
function(*args, **kwargs)
return outcome.success
def get_random_name(self, *args):
prefix = ['test']
for item in args:
prefix.append(item)
prefix.append(shortuuid.uuid())
return ".".join(prefix)
class BaseTestCase(AsyncTestCase):
async def create_connection(self, cleanup=True) -> Connection:
client = await connect(str(AMQP_URL), loop=self.loop)
if cleanup:
self.addCleanup(client.close)
return client
async def create_channel(self, connection=None,
cleanup=True, **kwargs) -> Channel:
if connection is None:
connection = await self.create_connection()
channel = await connection.channel(**kwargs)
if cleanup:
self.addCleanup(channel.close)
return channel
async def declare_queue(self, *args, **kwargs) -> Queue:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
queue = await channel.declare_queue(*args, **kwargs)
self.addCleanup(queue.delete)
return queue
async def declare_exchange(self, *args, **kwargs) -> Exchange:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
exchange = await channel.declare_exchange(*args, **kwargs)
self.addCleanup(exchange.delete)
return exchange
def timeout(timeout_sec=5):
def decorator(func):
@wraps(func)
async def wrap(self, *args, **kwargs):
loop = self.loop
task = loop.create_task(func(self, *args, **kwargs))
def on_timeout():
if task.done():
return
task.cancel()
self.loop.call_later(timeout_sec, on_timeout)
return await task
return wrap
return decorator | 0.486575 | 0.129706 |
import numpy as np
import cv2
import os
window_title = "The Input Image"
input_image = "input.jpg"
output_image = os.path.basename(__file__)[:-len(".py")] + ".jpg"
HORIZONTAL = 0
VERTICAL = 1
def read_image(file_name=input_image):
img = cv2.imread(file_name)
return img
def display_image(img, window_title=window_title):
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
cv2.imshow(window_title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
def grayscale(img):
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# =6, BGR and not RGB because of how cv2 returns images
return grayscale
def save_to_disk(img, filename=output_image):
cv2.imwrite(filename, img)
def get_dimensions_hw(img):
return img.shape[0:2]
def get_middle_pixels_hw(img, new_height, new_width):
input_img_h, input_img_w = get_dimensions_hw(img)
if new_height > input_img_h:
raise ValueError(
"Requested new height (" + str(new_height) + ") is greater than image height (" + str(input_img_h) + ").")
if new_width > input_img_w:
raise ValueError(
"Requested new width (" + str(new_width) + ") is greater than image width (" + str(input_img_w) + ").")
middle_h = round(input_img_h / 2)
half_new_height = round(new_height / 2)
middle_w = round(input_img_w / 2)
half_new_width = round(new_width / 2)
middle_pixels = img[middle_h - half_new_height:middle_h + half_new_height,
middle_w - half_new_width:middle_w + half_new_width]
return middle_pixels
def set_periodic_pixel(img, frequency, direction, new_pixel):
h, w = get_dimensions_hw(img)
img = np.array(img, copy=True)
if direction == HORIZONTAL:
for i in range(0, h):
for j in range(0, w, frequency):
img[i][j] = new_pixel
elif direction == VERTICAL:
for i in range(0, h, frequency):
for j in range(0, w):
img[i][j] = new_pixel
return img
def flip(img, direction):
h, w = get_dimensions_hw(img)
flipped = np.array(img, copy=True)
if direction == HORIZONTAL:
for i in range(h):
for j in range(w):
flipped[i][j] = img[i][w - j - 1]
elif direction == VERTICAL:
for i in range(h):
for j in range(w):
flipped[i][j] = img[h - i - 1][j]
return flipped
def show_side_by_side(img1, img2):
h1, w1 = get_dimensions_hw(img1)
h2, w2 = get_dimensions_hw(img2)
side_by_side = np.zeros([max(h1, h2), w1 + w2, 3], np.uint8)
for i in range(h1):
for j in range(w1):
side_by_side[i][j] = img1[i][j]
for i in range(h2):
for j in range(w2):
side_by_side[i][j + w1] = img2[i][j]
return side_by_side
if __name__ == "__main__":
img = read_image()
flipped = flip(img, VERTICAL)
img = show_side_by_side(img, flipped)
save_to_disk(img)
display_image(img) | LatestLibrary.py | import numpy as np
import cv2
import os
window_title = "The Input Image"
input_image = "input.jpg"
output_image = os.path.basename(__file__)[:-len(".py")] + ".jpg"
HORIZONTAL = 0
VERTICAL = 1
def read_image(file_name=input_image):
img = cv2.imread(file_name)
return img
def display_image(img, window_title=window_title):
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
cv2.imshow(window_title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
def grayscale(img):
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# =6, BGR and not RGB because of how cv2 returns images
return grayscale
def save_to_disk(img, filename=output_image):
cv2.imwrite(filename, img)
def get_dimensions_hw(img):
return img.shape[0:2]
def get_middle_pixels_hw(img, new_height, new_width):
input_img_h, input_img_w = get_dimensions_hw(img)
if new_height > input_img_h:
raise ValueError(
"Requested new height (" + str(new_height) + ") is greater than image height (" + str(input_img_h) + ").")
if new_width > input_img_w:
raise ValueError(
"Requested new width (" + str(new_width) + ") is greater than image width (" + str(input_img_w) + ").")
middle_h = round(input_img_h / 2)
half_new_height = round(new_height / 2)
middle_w = round(input_img_w / 2)
half_new_width = round(new_width / 2)
middle_pixels = img[middle_h - half_new_height:middle_h + half_new_height,
middle_w - half_new_width:middle_w + half_new_width]
return middle_pixels
def set_periodic_pixel(img, frequency, direction, new_pixel):
h, w = get_dimensions_hw(img)
img = np.array(img, copy=True)
if direction == HORIZONTAL:
for i in range(0, h):
for j in range(0, w, frequency):
img[i][j] = new_pixel
elif direction == VERTICAL:
for i in range(0, h, frequency):
for j in range(0, w):
img[i][j] = new_pixel
return img
def flip(img, direction):
h, w = get_dimensions_hw(img)
flipped = np.array(img, copy=True)
if direction == HORIZONTAL:
for i in range(h):
for j in range(w):
flipped[i][j] = img[i][w - j - 1]
elif direction == VERTICAL:
for i in range(h):
for j in range(w):
flipped[i][j] = img[h - i - 1][j]
return flipped
def show_side_by_side(img1, img2):
h1, w1 = get_dimensions_hw(img1)
h2, w2 = get_dimensions_hw(img2)
side_by_side = np.zeros([max(h1, h2), w1 + w2, 3], np.uint8)
for i in range(h1):
for j in range(w1):
side_by_side[i][j] = img1[i][j]
for i in range(h2):
for j in range(w2):
side_by_side[i][j + w1] = img2[i][j]
return side_by_side
if __name__ == "__main__":
img = read_image()
flipped = flip(img, VERTICAL)
img = show_side_by_side(img, flipped)
save_to_disk(img)
display_image(img) | 0.480722 | 0.235043 |
import sys
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (2, 4):
sys.exit("requires python 2.4 and up")
here = os.path.dirname(__file__)
exec(open(os.path.join(here, 'rpyc', 'version.py')).read())
setup(name = "rpyc",
version = version_string, #@UndefinedVariable
description = "Remote Python Call (RPyC), a transparent and symmetric RPC library",
author = "<NAME>",
author_email = "<EMAIL>",
license = "MIT",
url = "http://rpyc.sourceforge.net",
download_url = "http://sourceforge.net/projects/rpyc/files/main/%s" % (version_string,), #@UndefinedVariable
packages = [
'rpyc',
'rpyc.core',
'rpyc.lib',
'rpyc.utils',
'rpyc.scripts'
],
scripts = [
os.path.join("rpyc", "scripts", "rpyc_classic.py"),
os.path.join("rpyc", "scripts", "rpyc_registry.py"),
],
entry_points = dict(
console_scripts = [
"rpyc_classic = rpyc.scripts.rpyc_classic:main",
"rpyc_registry = rpyc.scretips.rpyc_registry:main",
],
),
platforms = ["POSIX", "Windows"],
use_2to3 = False,
zip_safe = False,
long_description = open(os.path.join(here, "README.rst"), "r").read(),
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Object Brokering",
"Topic :: Software Development :: Testing",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: System :: Monitoring",
"Topic :: System :: Networking",
"Topic :: System :: Systems Administration",
],
) | tests/rpyc-3.2.3/setup.py | import sys
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (2, 4):
sys.exit("requires python 2.4 and up")
here = os.path.dirname(__file__)
exec(open(os.path.join(here, 'rpyc', 'version.py')).read())
setup(name = "rpyc",
version = version_string, #@UndefinedVariable
description = "Remote Python Call (RPyC), a transparent and symmetric RPC library",
author = "<NAME>",
author_email = "<EMAIL>",
license = "MIT",
url = "http://rpyc.sourceforge.net",
download_url = "http://sourceforge.net/projects/rpyc/files/main/%s" % (version_string,), #@UndefinedVariable
packages = [
'rpyc',
'rpyc.core',
'rpyc.lib',
'rpyc.utils',
'rpyc.scripts'
],
scripts = [
os.path.join("rpyc", "scripts", "rpyc_classic.py"),
os.path.join("rpyc", "scripts", "rpyc_registry.py"),
],
entry_points = dict(
console_scripts = [
"rpyc_classic = rpyc.scripts.rpyc_classic:main",
"rpyc_registry = rpyc.scretips.rpyc_registry:main",
],
),
platforms = ["POSIX", "Windows"],
use_2to3 = False,
zip_safe = False,
long_description = open(os.path.join(here, "README.rst"), "r").read(),
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Object Brokering",
"Topic :: Software Development :: Testing",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: System :: Monitoring",
"Topic :: System :: Networking",
"Topic :: System :: Systems Administration",
],
) | 0.284278 | 0.213562 |
from random import randrange
from math import ceil
# Solde du jouueur
solde = 1000
print("\nVotre solde est de:", solde, "$.")
# Jouer tant que le solde du compte nous le permet
game_over = False
while game_over == False:
# Pour le numéro choisi
validation = False
while validation == False:
validation = True
numero_mise = input("\nDonnez le numéro sur lequel vous voulez miser (entre 0 et 49): ")
try:
numero_mise = int(numero_mise)
assert numero_mise >= 0 and numero_mise <= 49
except ValueError:
validation = False
print("Vous n'avez pas saisi un nombre.")
except AssertionError:
validation = False
print("Le nombre saisi n'est pas compris entre 0 et 49.")
# Pour la mise
validation = False
while validation == False:
validation = True
print("\nDonnez la somme que vous voulez miser sur le numéro", numero_mise, ": ")
somme_mise = input()
try:
somme_mise = int(somme_mise)
assert somme_mise <= solde
except ValueError:
validation = False
print("Vous n'avez pas saisi un nombre.")
except AssertionError:
validation = False
print("Vous ne pouvez pas miser une somme supérieure à votre solde.")
# Solde restant
solde -= somme_mise
print("Le solde restant :", solde)
# Génération d'un nombre aléatoire(entre 0 et 49)
input("\nMise et numéro enregistrés avec succès. Tapez ENTREE pour continuer...")
roulette = randrange(50)
print("\nLe numéro tiré par la roulette est :", roulette)
# Mise à jour du solde du joueur
if roulette == numero_mise:
solde += somme_mise * 3
print("Bravo, vous avez tiré le bon numéro")
elif roulette % 2 == 0 and numero_mise % 2 == 0 or (roulette % 2 != 0 and numero_mise % 2 != 0):
solde += ceil(somme_mise / 2)
print("Ohh, vous n'avez pas choisi le bon numéro. Mais vous reprenez quand même la moitié de votre mise(le numéro misé et le numéro gagnant ont la même couleur).")
else:
print("Dommage, vous n'avez rien remporté sur ce tour.")
# Affichage du nouveau solde
print("Votre nouveau solde est de :", solde, "$.")
# Vérifier si le jeu continue ou non
if solde <= 0:
game_over = True
print("\nDésolé, votre solde n'est plus suffisant pour continuer. Merci d'avoir joué.")
print("Solde final :", solde, "$.")
else:
game_over = False | tp-tous-au-zcasino/ZCasino.py |
from random import randrange
from math import ceil
# Solde du jouueur
solde = 1000
print("\nVotre solde est de:", solde, "$.")
# Jouer tant que le solde du compte nous le permet
game_over = False
while game_over == False:
# Pour le numéro choisi
validation = False
while validation == False:
validation = True
numero_mise = input("\nDonnez le numéro sur lequel vous voulez miser (entre 0 et 49): ")
try:
numero_mise = int(numero_mise)
assert numero_mise >= 0 and numero_mise <= 49
except ValueError:
validation = False
print("Vous n'avez pas saisi un nombre.")
except AssertionError:
validation = False
print("Le nombre saisi n'est pas compris entre 0 et 49.")
# Pour la mise
validation = False
while validation == False:
validation = True
print("\nDonnez la somme que vous voulez miser sur le numéro", numero_mise, ": ")
somme_mise = input()
try:
somme_mise = int(somme_mise)
assert somme_mise <= solde
except ValueError:
validation = False
print("Vous n'avez pas saisi un nombre.")
except AssertionError:
validation = False
print("Vous ne pouvez pas miser une somme supérieure à votre solde.")
# Solde restant
solde -= somme_mise
print("Le solde restant :", solde)
# Génération d'un nombre aléatoire(entre 0 et 49)
input("\nMise et numéro enregistrés avec succès. Tapez ENTREE pour continuer...")
roulette = randrange(50)
print("\nLe numéro tiré par la roulette est :", roulette)
# Mise à jour du solde du joueur
if roulette == numero_mise:
solde += somme_mise * 3
print("Bravo, vous avez tiré le bon numéro")
elif roulette % 2 == 0 and numero_mise % 2 == 0 or (roulette % 2 != 0 and numero_mise % 2 != 0):
solde += ceil(somme_mise / 2)
print("Ohh, vous n'avez pas choisi le bon numéro. Mais vous reprenez quand même la moitié de votre mise(le numéro misé et le numéro gagnant ont la même couleur).")
else:
print("Dommage, vous n'avez rien remporté sur ce tour.")
# Affichage du nouveau solde
print("Votre nouveau solde est de :", solde, "$.")
# Vérifier si le jeu continue ou non
if solde <= 0:
game_over = True
print("\nDésolé, votre solde n'est plus suffisant pour continuer. Merci d'avoir joué.")
print("Solde final :", solde, "$.")
else:
game_over = False | 0.210929 | 0.50293 |
import pytest
import fiona
import fiona.drvsupport
import fiona.meta
from fiona.drvsupport import supported_drivers
from fiona.errors import FionaValueError
from .conftest import requires_gdal2, requires_gdal23, requires_gdal31
from six import string_types
@requires_gdal31
@pytest.mark.parametrize("driver", supported_drivers)
def test_print_driver_options(driver):
""" Test fiona.meta.print_driver_options(driver) """
# do not fail
fiona.meta.print_driver_options(driver)
@requires_gdal2
def test_metadata_wrong_driver():
""" Test that FionaValueError is raised for non existing driver"""
with pytest.raises(FionaValueError):
fiona.meta.print_driver_options("Not existing driver")
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_extension(driver):
""" Test fiona.meta.extension(driver) """
# do not fail
extension = fiona.meta.extension(driver)
assert extension is None or isinstance(extension, string_types)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_extensions(driver):
""" Test fiona.meta.extensions(driver) """
# do not fail
extensions = fiona.meta.extensions(driver)
assert extensions is None or isinstance(extensions, list)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_supports_vsi(driver):
""" Test fiona.meta.supports_vsi(driver) """
# do not fail
assert fiona.meta.supports_vsi(driver) in (True, False)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_supported_field_types(driver):
""" Test fiona.meta.supported_field_types(driver) """
# do not fail
field_types = fiona.meta.supported_field_types(driver)
assert field_types is None or isinstance(field_types, list)
@requires_gdal23
@pytest.mark.parametrize("driver", supported_drivers)
def test_supported_sub_field_types(driver):
""" Test fiona.meta.supported_sub_field_types(driver) """
# do not fail
sub_field_types = fiona.meta.supported_sub_field_types(driver)
assert sub_field_types is None or isinstance(sub_field_types, list) | tests/test_meta.py | import pytest
import fiona
import fiona.drvsupport
import fiona.meta
from fiona.drvsupport import supported_drivers
from fiona.errors import FionaValueError
from .conftest import requires_gdal2, requires_gdal23, requires_gdal31
from six import string_types
@requires_gdal31
@pytest.mark.parametrize("driver", supported_drivers)
def test_print_driver_options(driver):
""" Test fiona.meta.print_driver_options(driver) """
# do not fail
fiona.meta.print_driver_options(driver)
@requires_gdal2
def test_metadata_wrong_driver():
""" Test that FionaValueError is raised for non existing driver"""
with pytest.raises(FionaValueError):
fiona.meta.print_driver_options("Not existing driver")
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_extension(driver):
""" Test fiona.meta.extension(driver) """
# do not fail
extension = fiona.meta.extension(driver)
assert extension is None or isinstance(extension, string_types)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_extensions(driver):
""" Test fiona.meta.extensions(driver) """
# do not fail
extensions = fiona.meta.extensions(driver)
assert extensions is None or isinstance(extensions, list)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_supports_vsi(driver):
""" Test fiona.meta.supports_vsi(driver) """
# do not fail
assert fiona.meta.supports_vsi(driver) in (True, False)
@requires_gdal2
@pytest.mark.parametrize("driver", supported_drivers)
def test_supported_field_types(driver):
""" Test fiona.meta.supported_field_types(driver) """
# do not fail
field_types = fiona.meta.supported_field_types(driver)
assert field_types is None or isinstance(field_types, list)
@requires_gdal23
@pytest.mark.parametrize("driver", supported_drivers)
def test_supported_sub_field_types(driver):
""" Test fiona.meta.supported_sub_field_types(driver) """
# do not fail
sub_field_types = fiona.meta.supported_sub_field_types(driver)
assert sub_field_types is None or isinstance(sub_field_types, list) | 0.415373 | 0.236054 |
import numpy as np
import os
from yt.data_objects.level_sets.api import \
add_validator
from yt.funcs import mylog
from yt.utilities.lib.misc_utilities import \
gravitational_binding_energy
from yt.utilities.physical_constants import \
gravitational_constant_cgs as G
def _future_bound(
clump,
use_thermal_energy=True,
truncate=True,
include_cooling=True):
"""
True if clump has negative total energy. This considers gas kinetic
energy, thermal energy, and radiative losses over a free-fall time
against gravitational potential energy of the gas and collisionless
particle system.
"""
num_threads = int(os.environ.get('OMP_NUM_THREADS', 1))
if clump["gas", "cell_mass"].size <= 1:
mylog.info("Clump has only one cell.")
return False
bulk_velocity = clump.quantities.bulk_velocity(
use_particles=False)
kinetic = 0.5 * (clump["gas", "cell_mass"] *
((bulk_velocity[0] - clump["gas", "velocity_x"])**2 +
(bulk_velocity[1] - clump["gas", "velocity_y"])**2 +
(bulk_velocity[2] - clump["gas", "velocity_z"])**2)).sum()
mylog.info("Kinetic energy: %e erg." %
kinetic.in_units("erg"))
if use_thermal_energy:
cooling_loss = clump.data.ds.quan(0.0, "erg")
thermal = (clump["gas", "cell_mass"] *
clump["gas", "thermal_energy"]).sum()
mylog.info("Thermal energy: %e erg." %
thermal.in_units("erg"))
if include_cooling:
# divide by sqrt(2) since t_ff = t_dyn / sqrt(2)
cooling_loss = \
(clump["gas", "cell_mass"] *
clump["gas", "dynamical_time"] *
clump["gas", "thermal_energy"] /
clump["gas", "cooling_time"]).sum() / np.sqrt(2)
mylog.info("Cooling loss: %e erg." %
cooling_loss.in_units("erg"))
thermal -= np.abs(cooling_loss)
kinetic += thermal
kinetic = max(kinetic, clump.data.ds.quan(0.0, "erg"))
mylog.info("Available energy: %e erg." %
kinetic.in_units("erg"))
m = np.concatenate([clump["gas", "cell_mass"].in_cgs(),
clump["all", "particle_mass"].in_cgs()])
px = np.concatenate([clump["index", "x"].in_cgs(),
clump["all", "particle_position_x"].in_cgs()])
py = np.concatenate([clump["index", "y"].in_cgs(),
clump["all", "particle_position_y"].in_cgs()])
pz = np.concatenate([clump["index", "z"].in_cgs(),
clump["all", "particle_position_z"].in_cgs()])
potential = clump.data.ds.quan(
G * gravitational_binding_energy(
m, px, py, pz,
truncate, (kinetic / G).in_cgs(),
num_threads=num_threads),
kinetic.in_cgs().units)
mylog.info("Potential energy: %e erg." %
potential.to('erg'))
return potential >= kinetic
add_validator("future_bound", _future_bound) | yt_p2p/clumps/validators.py | import numpy as np
import os
from yt.data_objects.level_sets.api import \
add_validator
from yt.funcs import mylog
from yt.utilities.lib.misc_utilities import \
gravitational_binding_energy
from yt.utilities.physical_constants import \
gravitational_constant_cgs as G
def _future_bound(
clump,
use_thermal_energy=True,
truncate=True,
include_cooling=True):
"""
True if clump has negative total energy. This considers gas kinetic
energy, thermal energy, and radiative losses over a free-fall time
against gravitational potential energy of the gas and collisionless
particle system.
"""
num_threads = int(os.environ.get('OMP_NUM_THREADS', 1))
if clump["gas", "cell_mass"].size <= 1:
mylog.info("Clump has only one cell.")
return False
bulk_velocity = clump.quantities.bulk_velocity(
use_particles=False)
kinetic = 0.5 * (clump["gas", "cell_mass"] *
((bulk_velocity[0] - clump["gas", "velocity_x"])**2 +
(bulk_velocity[1] - clump["gas", "velocity_y"])**2 +
(bulk_velocity[2] - clump["gas", "velocity_z"])**2)).sum()
mylog.info("Kinetic energy: %e erg." %
kinetic.in_units("erg"))
if use_thermal_energy:
cooling_loss = clump.data.ds.quan(0.0, "erg")
thermal = (clump["gas", "cell_mass"] *
clump["gas", "thermal_energy"]).sum()
mylog.info("Thermal energy: %e erg." %
thermal.in_units("erg"))
if include_cooling:
# divide by sqrt(2) since t_ff = t_dyn / sqrt(2)
cooling_loss = \
(clump["gas", "cell_mass"] *
clump["gas", "dynamical_time"] *
clump["gas", "thermal_energy"] /
clump["gas", "cooling_time"]).sum() / np.sqrt(2)
mylog.info("Cooling loss: %e erg." %
cooling_loss.in_units("erg"))
thermal -= np.abs(cooling_loss)
kinetic += thermal
kinetic = max(kinetic, clump.data.ds.quan(0.0, "erg"))
mylog.info("Available energy: %e erg." %
kinetic.in_units("erg"))
m = np.concatenate([clump["gas", "cell_mass"].in_cgs(),
clump["all", "particle_mass"].in_cgs()])
px = np.concatenate([clump["index", "x"].in_cgs(),
clump["all", "particle_position_x"].in_cgs()])
py = np.concatenate([clump["index", "y"].in_cgs(),
clump["all", "particle_position_y"].in_cgs()])
pz = np.concatenate([clump["index", "z"].in_cgs(),
clump["all", "particle_position_z"].in_cgs()])
potential = clump.data.ds.quan(
G * gravitational_binding_energy(
m, px, py, pz,
truncate, (kinetic / G).in_cgs(),
num_threads=num_threads),
kinetic.in_cgs().units)
mylog.info("Potential energy: %e erg." %
potential.to('erg'))
return potential >= kinetic
add_validator("future_bound", _future_bound) | 0.696681 | 0.404978 |
import argparse
from itertools import combinations
import numpy as np
import re
import sys
def read_w2v(w2v_path, word2index, n_dims=300, unk_token="unk"):
"""takes tokens from files and returns word vectors
:param w2v_path: path to pretrained embedding file
:param word2index: Counter of tokens from processed files
:param n_dims: embedding dimensions
:param unk_token: this is the unk token for glove <KEY>. Ideally we make this less hardcode-y
:return numpy array of word vectors
"""
print('Getting Word Vectors...', file=sys.stderr)
vocab = set()
# hacky thing to deal with making sure to incorporate unk tokens in the form they are in for a given embedding type
if unk_token not in word2index:
word2index[unk_token] = 0 # hardcoded, this would be better if it was a method of a class
word_vectors = np.zeros((len(word2index), n_dims)) # length of vocab x embedding dimensions
with open(w2v_path) as file:
lc = 0
for line in file:
lc += 1
line = line.strip()
if line:
row = line.split()
token = row[0]
if token in word2index or token == unk_token:
vocab.add(token)
try:
vec_data = [float(x) for x in row[1:]]
word_vectors[word2index[token]] = np.asarray(vec_data)
if lc == 1:
if len(vec_data) != n_dims:
raise RuntimeError("wrong number of dimensions")
except:
print('Error on line {}'.format(lc), file=sys.stderr)
# puts data for a given embedding at an index based on the word2index dict
# end up with a matrix of the entire vocab
tokens_without_embeddings = set(word2index) - vocab
print('Word Vectors ready!', file=sys.stderr)
print('{} tokens from text ({:.2f}%) have no embeddings'.format(
len(tokens_without_embeddings), len(tokens_without_embeddings)*100/len(word2index)), file=sys.stderr)
print('Tokens without embeddings: {}'.format(tokens_without_embeddings), file=sys.stderr)
print('Setting those tokens to unk embedding', file=sys.stderr)
for token in tokens_without_embeddings:
word_vectors[word2index[token]] = word_vectors[word2index[unk_token]]
return word_vectors
def get_tokens(files):
"""take a list of filepaths, returns word2idx dict"""
print('Getting tokens ... ...', file=sys.stderr)
all_tokens = set()
for path in files:
with open(path, 'r') as infile:
all_tokens.update(set(infile.read().strip().split()))
word2index = dict(map(reversed, enumerate(list(all_tokens))))
return word2index
def cos_sim(v1, v2):
return v1.dot(v2) / (np.sqrt(v1.dot(v1)) * np.sqrt(v2.dot(v2)))
def cos_sim_array(vec, vec_array):
"""
take dot product of 2 vectors. which reduces dimensionality and gives me an array of results.
IMPORTANT that vec_array is first arg as a result
:param vec: a vector
:param vec_array: an array of vectors
:return: cosine_sim_array of the cosine similarity between the vector and each vector in the array
"""
dot_prod_array = np.dot(vec_array, vec)
len_vec_array, len_x_d = (vec_array**2).sum(axis=1) ** .5, (vec ** 2).sum() ** .5
cosine_sim_array = np.divide(dot_prod_array, len_vec_array*len_x_d)
return cosine_sim_array
def remove_chars(text: str, remove='#') -> str:
"""take a string and optional chars to remove and returns string without them"""
return re.sub(r'[{}]'.format(remove), '', text)
def make_vec_array(word_list: list, word_vectors, word2index: dict, drop_set={'#', '<EOL>', '<EOT>', '<\s>'}):
"""take a list of strings, an array of word vectors, return a numpy array of word vectors"""
vecs = [np.array(word_vectors[word2index.get(word, 0)])
for word in word_list if word not in drop_set]
return np.array(vecs)
def calc_similarity(storyline_path, story_path, word2index, word_vectors):
"""calculates cosine similarity between keywords in storyline and between keywords in storyline
and corresponding sentence in story. Averaged over all """
keyword_relatedness = 0
keyword_incorporation_rate = 0
storylines, stories = [], []
with open(storyline_path, 'r') as infile:
for line in infile:
processed_line = remove_chars(line).strip().split()[:-1] # remove the <EOL> at the end
storylines.append(processed_line)
with open(story_path, 'r') as infile:
for line in infile:
processed_line = remove_chars(line).strip().split()
stories.append(processed_line)
num_storylines = len(storylines)
assert(num_storylines == len(stories)), "Mismatch between number of storylines and number of stories"
# loop through stories and storylines and calc similarities
for i in range(num_storylines):
storyline_word_array = make_vec_array(storylines[i], word_vectors, word2index) # all storyline vectors
story_word_array = make_vec_array(stories[i], word_vectors, word2index) # all story word vectors
# calculate the similarities between the storyline words
# this is the cumulative cosine similarity between each word and all the other words then averaged
num_words_in_storyline = len(storyline_word_array)
storyline_idx_combinations = list(combinations(range(num_words_in_storyline), 2))
this_storyline_relatedness = 0
for kw1, kw2 in storyline_idx_combinations:
this_storyline_relatedness += cos_sim(storyline_word_array[kw1], storyline_word_array[kw2])
#print("KW Relatedness", this_storyline_relatedness/len(storyline_idx_combinations)) # to debug individual lines
keyword_relatedness += this_storyline_relatedness/len(storyline_idx_combinations) # since end up with 2x comparisons as words
# calculate the similarities between the word and the sentence
# this is the maximum cosine sim between each keyword and any other word in the sentence, summed over keywords then averaged
this_incorporation_rate = 0
for kw_vec in storyline_word_array:
cosine_max = np.nanmax(cos_sim_array(kw_vec, story_word_array))
this_incorporation_rate += cosine_max
#print("KW Incorporation", this_incorporation_rate/num_words_in_storyline) # to debug individual lines
keyword_incorporation_rate += this_incorporation_rate/num_words_in_storyline
# report average over all in set
keyword_relatedness /= num_storylines
keyword_incorporation_rate /= num_storylines
print('Metrics for {} samples'.format(num_storylines))
print('dynamic relatedness : {:.2f}'.format(keyword_relatedness))
print('dynamic keyword_incorporation_rate : {:.2f}'.format(keyword_incorporation_rate))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('storyline_file', type=str,
help='location of file with storylines')
parser.add_argument('story_file', type=str, help='location of story file')
parser.add_argument('wordvec_file', type=str, help='path to wordvec file' )
args = parser.parse_args()
word2idx = get_tokens([args.storyline_file, args.story_file]) # takes list of arbitrarily many files
word_vectors = read_w2v(args.wordvec_file, word2idx)
calc_similarity(args.storyline_file, args.story_file, word2idx, word_vectors) | python_src/keywords_incorp.py |
import argparse
from itertools import combinations
import numpy as np
import re
import sys
def read_w2v(w2v_path, word2index, n_dims=300, unk_token="unk"):
"""takes tokens from files and returns word vectors
:param w2v_path: path to pretrained embedding file
:param word2index: Counter of tokens from processed files
:param n_dims: embedding dimensions
:param unk_token: this is the unk token for glove <KEY>. Ideally we make this less hardcode-y
:return numpy array of word vectors
"""
print('Getting Word Vectors...', file=sys.stderr)
vocab = set()
# hacky thing to deal with making sure to incorporate unk tokens in the form they are in for a given embedding type
if unk_token not in word2index:
word2index[unk_token] = 0 # hardcoded, this would be better if it was a method of a class
word_vectors = np.zeros((len(word2index), n_dims)) # length of vocab x embedding dimensions
with open(w2v_path) as file:
lc = 0
for line in file:
lc += 1
line = line.strip()
if line:
row = line.split()
token = row[0]
if token in word2index or token == unk_token:
vocab.add(token)
try:
vec_data = [float(x) for x in row[1:]]
word_vectors[word2index[token]] = np.asarray(vec_data)
if lc == 1:
if len(vec_data) != n_dims:
raise RuntimeError("wrong number of dimensions")
except:
print('Error on line {}'.format(lc), file=sys.stderr)
# puts data for a given embedding at an index based on the word2index dict
# end up with a matrix of the entire vocab
tokens_without_embeddings = set(word2index) - vocab
print('Word Vectors ready!', file=sys.stderr)
print('{} tokens from text ({:.2f}%) have no embeddings'.format(
len(tokens_without_embeddings), len(tokens_without_embeddings)*100/len(word2index)), file=sys.stderr)
print('Tokens without embeddings: {}'.format(tokens_without_embeddings), file=sys.stderr)
print('Setting those tokens to unk embedding', file=sys.stderr)
for token in tokens_without_embeddings:
word_vectors[word2index[token]] = word_vectors[word2index[unk_token]]
return word_vectors
def get_tokens(files):
"""take a list of filepaths, returns word2idx dict"""
print('Getting tokens ... ...', file=sys.stderr)
all_tokens = set()
for path in files:
with open(path, 'r') as infile:
all_tokens.update(set(infile.read().strip().split()))
word2index = dict(map(reversed, enumerate(list(all_tokens))))
return word2index
def cos_sim(v1, v2):
return v1.dot(v2) / (np.sqrt(v1.dot(v1)) * np.sqrt(v2.dot(v2)))
def cos_sim_array(vec, vec_array):
"""
take dot product of 2 vectors. which reduces dimensionality and gives me an array of results.
IMPORTANT that vec_array is first arg as a result
:param vec: a vector
:param vec_array: an array of vectors
:return: cosine_sim_array of the cosine similarity between the vector and each vector in the array
"""
dot_prod_array = np.dot(vec_array, vec)
len_vec_array, len_x_d = (vec_array**2).sum(axis=1) ** .5, (vec ** 2).sum() ** .5
cosine_sim_array = np.divide(dot_prod_array, len_vec_array*len_x_d)
return cosine_sim_array
def remove_chars(text: str, remove='#') -> str:
"""take a string and optional chars to remove and returns string without them"""
return re.sub(r'[{}]'.format(remove), '', text)
def make_vec_array(word_list: list, word_vectors, word2index: dict, drop_set={'#', '<EOL>', '<EOT>', '<\s>'}):
"""take a list of strings, an array of word vectors, return a numpy array of word vectors"""
vecs = [np.array(word_vectors[word2index.get(word, 0)])
for word in word_list if word not in drop_set]
return np.array(vecs)
def calc_similarity(storyline_path, story_path, word2index, word_vectors):
"""calculates cosine similarity between keywords in storyline and between keywords in storyline
and corresponding sentence in story. Averaged over all """
keyword_relatedness = 0
keyword_incorporation_rate = 0
storylines, stories = [], []
with open(storyline_path, 'r') as infile:
for line in infile:
processed_line = remove_chars(line).strip().split()[:-1] # remove the <EOL> at the end
storylines.append(processed_line)
with open(story_path, 'r') as infile:
for line in infile:
processed_line = remove_chars(line).strip().split()
stories.append(processed_line)
num_storylines = len(storylines)
assert(num_storylines == len(stories)), "Mismatch between number of storylines and number of stories"
# loop through stories and storylines and calc similarities
for i in range(num_storylines):
storyline_word_array = make_vec_array(storylines[i], word_vectors, word2index) # all storyline vectors
story_word_array = make_vec_array(stories[i], word_vectors, word2index) # all story word vectors
# calculate the similarities between the storyline words
# this is the cumulative cosine similarity between each word and all the other words then averaged
num_words_in_storyline = len(storyline_word_array)
storyline_idx_combinations = list(combinations(range(num_words_in_storyline), 2))
this_storyline_relatedness = 0
for kw1, kw2 in storyline_idx_combinations:
this_storyline_relatedness += cos_sim(storyline_word_array[kw1], storyline_word_array[kw2])
#print("KW Relatedness", this_storyline_relatedness/len(storyline_idx_combinations)) # to debug individual lines
keyword_relatedness += this_storyline_relatedness/len(storyline_idx_combinations) # since end up with 2x comparisons as words
# calculate the similarities between the word and the sentence
# this is the maximum cosine sim between each keyword and any other word in the sentence, summed over keywords then averaged
this_incorporation_rate = 0
for kw_vec in storyline_word_array:
cosine_max = np.nanmax(cos_sim_array(kw_vec, story_word_array))
this_incorporation_rate += cosine_max
#print("KW Incorporation", this_incorporation_rate/num_words_in_storyline) # to debug individual lines
keyword_incorporation_rate += this_incorporation_rate/num_words_in_storyline
# report average over all in set
keyword_relatedness /= num_storylines
keyword_incorporation_rate /= num_storylines
print('Metrics for {} samples'.format(num_storylines))
print('dynamic relatedness : {:.2f}'.format(keyword_relatedness))
print('dynamic keyword_incorporation_rate : {:.2f}'.format(keyword_incorporation_rate))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('storyline_file', type=str,
help='location of file with storylines')
parser.add_argument('story_file', type=str, help='location of story file')
parser.add_argument('wordvec_file', type=str, help='path to wordvec file' )
args = parser.parse_args()
word2idx = get_tokens([args.storyline_file, args.story_file]) # takes list of arbitrarily many files
word_vectors = read_w2v(args.wordvec_file, word2idx)
calc_similarity(args.storyline_file, args.story_file, word2idx, word_vectors) | 0.5564 | 0.515681 |
import pytest
from mongoengine import ValidationError
from kairon.api.models import HttpActionConfigRequest, HttpActionParameters
from kairon.shared.data.data_objects import Slots, SlotMapping
class TestBotModels:
def test_http_action_params_valid(self):
assert HttpActionParameters(key="param1", value="param1", parameter_type="slot")
assert HttpActionParameters(key="param1", value="param1", parameter_type="value")
HttpActionParameters(key="key", value="", parameter_type="value")
HttpActionParameters(key="key", value=None, parameter_type="value")
assert HttpActionParameters(key="param1", value="param1", parameter_type="sender_id")
assert HttpActionParameters(key="param1", value="", parameter_type="sender_id")
assert HttpActionParameters(key="param1", parameter_type="sender_id")
def test_http_action_params_invalid(self):
with pytest.raises(ValueError, match=r".*key cannot be empty.*"):
HttpActionParameters(key="", value="param1", parameter_type="slot")
with pytest.raises(ValueError, match=r".*key cannot be empty.*"):
HttpActionParameters(key=None, value="param1", parameter_type="slot")
with pytest.raises(ValueError, match=r".*Provide name of the slot as value.*"):
HttpActionParameters(key="key", value="", parameter_type="slot")
with pytest.raises(ValueError, match=r".*Provide name of the slot as value.*"):
HttpActionParameters(key="key", value=None, parameter_type="slot")
with pytest.raises(ValueError, match=r".*parameter_type\n value is not a valid enumeration member.*"):
HttpActionParameters(key="key", value="value", parameter_type="unknown_type")
def test_http_action_config_request_valid(self):
HttpActionConfigRequest(
auth_token="",
action_name="test_action",
response="response",
http_url="http://www.google.com",
request_method="GET",
http_params_list=[]
)
HttpActionConfigRequest(
auth_token=None,
action_name="test_action",
response="response",
http_url="http://www.google.com",
request_method="GET",
http_params_list=[]
)
def test_http_action_config_request_invalid(self):
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name=None, response="response",
http_url="http://www.google.com",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*action_name is required*"):
HttpActionConfigRequest(auth_token="", action_name="", response="response",
http_url="http://www.google.com",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response=None,
http_url="http://www.google.com",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*URL is malformed.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url="",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url=None,
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".URL is malformed.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="www.google.com", request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*Invalid HTTP method.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="http://www.google.com",
request_method="OPTIONS", http_params_list=[])
with pytest.raises(ValueError, match=r".*Invalid HTTP method.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="http://www.google.com",
request_method="", http_params_list=[])
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="http://www.google.com",
request_method=None, http_params_list=[])
def test_slot(self):
with pytest.raises(ValueError, match="Slot name and type cannot be empty or blank spaces"):
Slots(name='email_id', type=' ', auto_fill=True).save()
with pytest.raises(ValueError, match="Slot name and type cannot be empty or blank spaces"):
Slots(name=' ', type='text', auto_fill=True).save()
def test_validate_slot_mapping(self):
with pytest.raises(ValueError, match="Slot name cannot be empty or blank spaces"):
SlotMapping(slot=' ', mapping=[{"type": "from_value"}]).save()
with pytest.raises(ValidationError,
match="Your form 'form_name' uses an invalid slot mapping of type 'from_value' for slot 'email_id'. Please see https://rasa.com/docs/rasa/forms for more information."):
SlotMapping(slot='email_id', mapping=[{"type": "from_value"}]).save()
assert not SlotMapping(
slot='email_id', mapping=[{"type": "from_intent", "value": '<EMAIL>'}]
).validate() | tests/unit_test/models/model_test.py | import pytest
from mongoengine import ValidationError
from kairon.api.models import HttpActionConfigRequest, HttpActionParameters
from kairon.shared.data.data_objects import Slots, SlotMapping
class TestBotModels:
def test_http_action_params_valid(self):
assert HttpActionParameters(key="param1", value="param1", parameter_type="slot")
assert HttpActionParameters(key="param1", value="param1", parameter_type="value")
HttpActionParameters(key="key", value="", parameter_type="value")
HttpActionParameters(key="key", value=None, parameter_type="value")
assert HttpActionParameters(key="param1", value="param1", parameter_type="sender_id")
assert HttpActionParameters(key="param1", value="", parameter_type="sender_id")
assert HttpActionParameters(key="param1", parameter_type="sender_id")
def test_http_action_params_invalid(self):
with pytest.raises(ValueError, match=r".*key cannot be empty.*"):
HttpActionParameters(key="", value="param1", parameter_type="slot")
with pytest.raises(ValueError, match=r".*key cannot be empty.*"):
HttpActionParameters(key=None, value="param1", parameter_type="slot")
with pytest.raises(ValueError, match=r".*Provide name of the slot as value.*"):
HttpActionParameters(key="key", value="", parameter_type="slot")
with pytest.raises(ValueError, match=r".*Provide name of the slot as value.*"):
HttpActionParameters(key="key", value=None, parameter_type="slot")
with pytest.raises(ValueError, match=r".*parameter_type\n value is not a valid enumeration member.*"):
HttpActionParameters(key="key", value="value", parameter_type="unknown_type")
def test_http_action_config_request_valid(self):
HttpActionConfigRequest(
auth_token="",
action_name="test_action",
response="response",
http_url="http://www.google.com",
request_method="GET",
http_params_list=[]
)
HttpActionConfigRequest(
auth_token=None,
action_name="test_action",
response="response",
http_url="http://www.google.com",
request_method="GET",
http_params_list=[]
)
def test_http_action_config_request_invalid(self):
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name=None, response="response",
http_url="http://www.google.com",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*action_name is required*"):
HttpActionConfigRequest(auth_token="", action_name="", response="response",
http_url="http://www.google.com",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response=None,
http_url="http://www.google.com",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*URL is malformed.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url="",
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url=None,
request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".URL is malformed.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="www.google.com", request_method="GET", http_params_list=[])
with pytest.raises(ValueError, match=r".*Invalid HTTP method.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="http://www.google.com",
request_method="OPTIONS", http_params_list=[])
with pytest.raises(ValueError, match=r".*Invalid HTTP method.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="http://www.google.com",
request_method="", http_params_list=[])
with pytest.raises(ValueError, match=r".*none is not an allowed value.*"):
HttpActionConfigRequest(auth_token="", action_name="http_action", response="response",
http_url="http://www.google.com",
request_method=None, http_params_list=[])
def test_slot(self):
with pytest.raises(ValueError, match="Slot name and type cannot be empty or blank spaces"):
Slots(name='email_id', type=' ', auto_fill=True).save()
with pytest.raises(ValueError, match="Slot name and type cannot be empty or blank spaces"):
Slots(name=' ', type='text', auto_fill=True).save()
def test_validate_slot_mapping(self):
with pytest.raises(ValueError, match="Slot name cannot be empty or blank spaces"):
SlotMapping(slot=' ', mapping=[{"type": "from_value"}]).save()
with pytest.raises(ValidationError,
match="Your form 'form_name' uses an invalid slot mapping of type 'from_value' for slot 'email_id'. Please see https://rasa.com/docs/rasa/forms for more information."):
SlotMapping(slot='email_id', mapping=[{"type": "from_value"}]).save()
assert not SlotMapping(
slot='email_id', mapping=[{"type": "from_intent", "value": '<EMAIL>'}]
).validate() | 0.63307 | 0.453383 |
from ..CommonInterface import CommonInterfaceNsd
import json
import requests
class Nsd(CommonInterfaceNsd):
"""
NSD Management Interfaces
"""
def __init__(self, host, port=4002):
self._host = host
self._port = port
self._base_path = 'http://{0}:{1}'
self._user_endpoint = '{0}'
def get_ns_descriptors(self, token, offset=None, limit=None, host=None, port=None):
""" NSD Management Interface - NS Descriptors
/ns_descriptors:
GET - Query information about multiple
NS descriptor resources.
:param token: auth token retrieved by the auth call
:param offset: offset index while returning
:param limit: limit records while returning
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_nsd = SONATAClient.Nsd(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(
username=USERNAME,
password=PASSWORD))
_token = json.loads(_token["data"])
response = json.loads(sonata_nsd.get_ns_descriptors(
token=_token["token"]["access_token"]))
response = json.loads(response["data"])
"""
if host is None:
base_path = "http://{0}:{1}".format(self._host, self._port)
else:
base_path = "http://{0}:{1}".format(host, port)
if not offset:
offset = 0
if not limit:
limit = 10
_endpoint = "{0}/catalogues/api/v2/network-services?offset={1}&limit={2}".format(base_path, offset, limit)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_ns_descriptors(self, token, package_path, host=None, port=None):
""" NSD Management Interface - NS Descriptors
/ns_descriptors:
POST - Create a new NS descriptor resource.
:param token: auth token retrieved by the auth call
:param package_path: file path of the package
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_vnfpkgm = SONATAClient.VnfPkgm(HOST_URL)
sonata_nsd = SONATAClient.Nsd(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
sonata_vnfpkgm.post_vnf_packages(token=_token["token"]["access_token"],
package_path="tests/samples/vnfd_example.yml")
response = json.loads(sonata_nsd.post_ns_descriptors(
token=_token["token"]["access_token"],
package_path="tests/samples/nsd_example.yml"))
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/network-services".format(base_path)
try:
r = requests.post(_endpoint, data=open(package_path, 'rb'), verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def delete_ns_descriptors_nsdinfoid(self, token, nsdinfoid, host=None, port=None):
""" NSD Management Interface - Individual NS Descriptor
/ns_descriptors/{nsdInfoId}:
DELETE - Delete the content of NSD
:param token: auth token retrieved by the auth call
:param nsdinfoid: id of the individual NSD
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/network-services/{1}".format(base_path, nsdinfoid)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def put_ns_descriptors_nsdinfoid(self, token, data_path, nsdinfoid, host=None, port=None):
""" NSD Management Interface - Individual NS Descriptor
/ns_descriptors/{nsdInfoId}:
PUT - Update the content of NSD
:param token: auth token retrieved by the auth call
:param nsdinfoid: id of the individual NSD
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/network-services/{1}".format(base_path, nsdinfoid)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def get_ns_descriptors_nsdinfoid(self, token, nsdinfoid, host=None, port=None):
""" NSD Management Interface - Individual NS Descriptor
/ns_descriptors/{nsdInfoId}:
Read information about an individual NS
descriptor resource.
:param token: auth token retrieved by the auth call
:param nsdinfoid: id of the individual NSD
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = "http://{0}:{1}".format(self._host, self._port)
else:
base_path = "http://{0}:{1}".format(host, port)
_endpoint = "{0}/catalogues/api/v2/network-services/{1}".format(base_path, nsdinfoid)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def patch_ns_descriptors_nsdinfoid(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_ns_descriptors_nsd_content(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def patch_ns_descriptors_nsd_content(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def put_ns_descriptors_nsd_content(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_pnf_descriptors(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def post_pnf_descriptors(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_pnf_descriptors_pnfdinfoid(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def patch_pnf_descriptors_pnfdinfoid(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def delete_pnf_descriptors_pnfdinfoid(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_pnf_descriptors_pnfd_content(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def put_pnf_descriptors_pnfd_content(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def post_subscriptions(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_subscriptions(self, subscriptionId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_subscriptions_subscriptionid(self, subscriptionid):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def delete_subscriptions_subscriptionid(self, subscriptionid):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result) | wrappers/SONATAClient/nsd.py | from ..CommonInterface import CommonInterfaceNsd
import json
import requests
class Nsd(CommonInterfaceNsd):
"""
NSD Management Interfaces
"""
def __init__(self, host, port=4002):
self._host = host
self._port = port
self._base_path = 'http://{0}:{1}'
self._user_endpoint = '{0}'
def get_ns_descriptors(self, token, offset=None, limit=None, host=None, port=None):
""" NSD Management Interface - NS Descriptors
/ns_descriptors:
GET - Query information about multiple
NS descriptor resources.
:param token: auth token retrieved by the auth call
:param offset: offset index while returning
:param limit: limit records while returning
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_nsd = SONATAClient.Nsd(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(
username=USERNAME,
password=PASSWORD))
_token = json.loads(_token["data"])
response = json.loads(sonata_nsd.get_ns_descriptors(
token=_token["token"]["access_token"]))
response = json.loads(response["data"])
"""
if host is None:
base_path = "http://{0}:{1}".format(self._host, self._port)
else:
base_path = "http://{0}:{1}".format(host, port)
if not offset:
offset = 0
if not limit:
limit = 10
_endpoint = "{0}/catalogues/api/v2/network-services?offset={1}&limit={2}".format(base_path, offset, limit)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_ns_descriptors(self, token, package_path, host=None, port=None):
""" NSD Management Interface - NS Descriptors
/ns_descriptors:
POST - Create a new NS descriptor resource.
:param token: auth token retrieved by the auth call
:param package_path: file path of the package
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_vnfpkgm = SONATAClient.VnfPkgm(HOST_URL)
sonata_nsd = SONATAClient.Nsd(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
sonata_vnfpkgm.post_vnf_packages(token=_token["token"]["access_token"],
package_path="tests/samples/vnfd_example.yml")
response = json.loads(sonata_nsd.post_ns_descriptors(
token=_token["token"]["access_token"],
package_path="tests/samples/nsd_example.yml"))
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/network-services".format(base_path)
try:
r = requests.post(_endpoint, data=open(package_path, 'rb'), verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def delete_ns_descriptors_nsdinfoid(self, token, nsdinfoid, host=None, port=None):
""" NSD Management Interface - Individual NS Descriptor
/ns_descriptors/{nsdInfoId}:
DELETE - Delete the content of NSD
:param token: auth token retrieved by the auth call
:param nsdinfoid: id of the individual NSD
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/network-services/{1}".format(base_path, nsdinfoid)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def put_ns_descriptors_nsdinfoid(self, token, data_path, nsdinfoid, host=None, port=None):
""" NSD Management Interface - Individual NS Descriptor
/ns_descriptors/{nsdInfoId}:
PUT - Update the content of NSD
:param token: auth token retrieved by the auth call
:param nsdinfoid: id of the individual NSD
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/network-services/{1}".format(base_path, nsdinfoid)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def get_ns_descriptors_nsdinfoid(self, token, nsdinfoid, host=None, port=None):
""" NSD Management Interface - Individual NS Descriptor
/ns_descriptors/{nsdInfoId}:
Read information about an individual NS
descriptor resource.
:param token: auth token retrieved by the auth call
:param nsdinfoid: id of the individual NSD
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = "http://{0}:{1}".format(self._host, self._port)
else:
base_path = "http://{0}:{1}".format(host, port)
_endpoint = "{0}/catalogues/api/v2/network-services/{1}".format(base_path, nsdinfoid)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def patch_ns_descriptors_nsdinfoid(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_ns_descriptors_nsd_content(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def patch_ns_descriptors_nsd_content(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def put_ns_descriptors_nsd_content(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_pnf_descriptors(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def post_pnf_descriptors(self):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_pnf_descriptors_pnfdinfoid(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def patch_pnf_descriptors_pnfdinfoid(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def delete_pnf_descriptors_pnfdinfoid(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_pnf_descriptors_pnfd_content(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def put_pnf_descriptors_pnfd_content(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def post_subscriptions(self, pnfdInfoId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_subscriptions(self, subscriptionId):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def get_subscriptions_subscriptionid(self, subscriptionid):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result)
def delete_subscriptions_subscriptionid(self, subscriptionid):
result = {'error': True, 'data': 'Method not implemented in target MANO'}
return json.dumps(result) | 0.583797 | 0.088899 |
from pathlib import Path
from unittest.mock import patch
import pytest
from remove_empty_comment.__main__ import main, transform
def test_integration():
with patch("sys.argv", ["remove-empty-comment"]):
with pytest.raises(SystemExit) as e:
main()
assert e.value.code == 0
with patch("sys.argv", ["remove-empty-comment", __file__, "-c", "#, -"]):
with pytest.raises(SystemExit) as e:
main()
assert e.value.code == 0
def test_transform():
fixture_file = str(Path(__file__).parent / "fixture.py")
with open(fixture_file) as f:
content = f.readlines()
new_content = transform(content, meaningless_characters=["#"])
assert new_content == [
"import argparse\n",
"\n",
"# main function\n",
"def main():\n",
' """Main function, can you read?\n',
"\n",
" parameters:\n",
" -----------\n",
' None"""\n',
" a = 1\n",
" b = 2\n",
" c = a + b\n",
"\n",
"\n",
" print(c)\n",
"\n",
"\n",
"# ARGUMENTS\n",
"# =========\n",
"parser = argparse.ArgumentParser()\n",
"\n",
"# Checking-inputs\n",
"# ------------------------------------\n",
"print(parser)\n",
]
new_content = transform(content, meaningless_characters=["#", "-", "=", " "])
assert new_content == [
"import argparse\n",
"\n",
"# main function\n",
"def main():\n",
' """Main function, can you read?\n',
"\n",
" parameters:\n",
" -----------\n",
' None"""\n',
" a = 1\n",
" b = 2\n",
" c = a + b\n",
"\n",
"\n",
" print(c)\n",
"\n",
"\n",
"# ARGUMENTS\n",
"parser = argparse.ArgumentParser()\n",
"\n",
"# Checking-inputs\n",
"print(parser)\n",
] | tests/test_integration.py | from pathlib import Path
from unittest.mock import patch
import pytest
from remove_empty_comment.__main__ import main, transform
def test_integration():
with patch("sys.argv", ["remove-empty-comment"]):
with pytest.raises(SystemExit) as e:
main()
assert e.value.code == 0
with patch("sys.argv", ["remove-empty-comment", __file__, "-c", "#, -"]):
with pytest.raises(SystemExit) as e:
main()
assert e.value.code == 0
def test_transform():
fixture_file = str(Path(__file__).parent / "fixture.py")
with open(fixture_file) as f:
content = f.readlines()
new_content = transform(content, meaningless_characters=["#"])
assert new_content == [
"import argparse\n",
"\n",
"# main function\n",
"def main():\n",
' """Main function, can you read?\n',
"\n",
" parameters:\n",
" -----------\n",
' None"""\n',
" a = 1\n",
" b = 2\n",
" c = a + b\n",
"\n",
"\n",
" print(c)\n",
"\n",
"\n",
"# ARGUMENTS\n",
"# =========\n",
"parser = argparse.ArgumentParser()\n",
"\n",
"# Checking-inputs\n",
"# ------------------------------------\n",
"print(parser)\n",
]
new_content = transform(content, meaningless_characters=["#", "-", "=", " "])
assert new_content == [
"import argparse\n",
"\n",
"# main function\n",
"def main():\n",
' """Main function, can you read?\n',
"\n",
" parameters:\n",
" -----------\n",
' None"""\n',
" a = 1\n",
" b = 2\n",
" c = a + b\n",
"\n",
"\n",
" print(c)\n",
"\n",
"\n",
"# ARGUMENTS\n",
"parser = argparse.ArgumentParser()\n",
"\n",
"# Checking-inputs\n",
"print(parser)\n",
] | 0.501709 | 0.387256 |
import threading
import requests
import argparse
import random
import json
import time
import sys
import re
import os
status = {
'sent': 0,
'errors': 0,
}
class youtube:
vid = None
session = None
def __init__(self, vid):
self.vid = vid
self.session = requests.session()
def getPlayerConfig(self):
r = self.session.get('https://www.youtube.com/watch?v=' + self.vid, headers={
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Mobile/15E148 Safari/604.1',
'Accept': 'image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5',
'Accept-Language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'Referer': 'https://m.youtube.com/watch?v=' + self.vid
}).text
if sys.version_info.major < 3:
r = r.encode('utf-8', 'ignore')
if 'ytplayer.config' in r:
data = re.findall(r'ytplayer\.config = (.*);ytplayer\.web_player_context_config', r)
elif 'ytInitialPlayerConfig' in r:
data = re.findall(r'ytInitialPlayerConfig = (.*);\n', r)
data = json.loads(data[0])['args']['player_response']
player = json.loads(data)
return player
def getWatchtime(self):
config = self.getPlayerConfig()
vanilla = config['playbackTracking']['videostatsWatchtimeUrl']['baseUrl'].replace('\\u0026', '&').replace('%2C', ',')
cl = vanilla.split("cl=")[1].split("&")[0]
ei = vanilla.split("ei=")[1].split("&")[0]
of = vanilla.split("of=")[1].split("&")[0]
vm = vanilla.split('vm=')[1].split('&')[0]
return 'https://s.youtube.com/api/stats/watchtime?ns=yt&el=detailpage&cpn=isWmmj2C9Y2vULKF&docid=' + self.vid + '&ver=2&cmt=7334&ei=' + ei + '&fmt=133&fs=0&rt=1003&of=' + of + '&euri&lact=4418&live=dvr&cl=' + cl + '&state=playing&vm=' + vm + '&volume=100&c=MWEB&cver=2.20200313.03.00&cplayer=UNIPLAYER&cbrand=apple&cbr=Safari%20Mobile&cbrver=12.1.15E148&cmodel=iphone&cos=iPhone&cosver=12_2&cplatform=MOBILE&delay=5&hl=ru&cr=GB&rtn=1303&afmt=140&lio=1556394045.182&idpj=&ldpj=&rti=1003&muted=0&st=7334&et=7634'
def watchLive(self, proxy=None):
global status
try:
watch = self.getWatchtime()
self.session.get(watch, headers={
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Mobile/15E148 Safari/604.1',
'Accept': 'image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5',
'Accept-Language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'Referer': 'https://m.youtube.com/watch?v=' + self.vid
}, proxies=proxy)
status['sent'] += 1
except:
status['errors'] += 1
def viewThread(vid, proxy=None):
proxy = {"https": "https://%s" % proxy} if proxy else None
youtube(vid).watchLive(proxy)
def statusThread():
global status
while True:
_status = '* sent: %s | errors: %s | total: %s' % (status['sent'], status['errors'], status['sent'] + status['errors'])
sys.stdout.write('%s\r' % _status)
def formatProxyList(proxy):
f = open(proxy, 'r')
l = f.read()
f.close()
return [x.rstrip().lstrip() for x in l.splitlines()]
if __name__ == '__main__':
print('* visualize.py - youtube live viewers bot')
print('* created by neon // @TheFamilyTeam')
print('* https://github.com/TheFamilyTeam')
print('')
parser = argparse.ArgumentParser(description='youtube live viewers bot - https://github.com/TheFamilyTeam')
parser.add_argument('--id', '-i', type=str, help='video id', required=True)
parser.add_argument('--proxy', '-p', type=str, help='proxy file')
parser.add_argument('--delay', '-d', type=float, help='bot delay')
args = parser.parse_args()
print('* botting...')
threading.Thread(target=statusThread).start()
if args.proxy:
if not os.path.isfile(args.proxy):
print('* invalid proxy file')
exit(1)
proxies = formatProxyList(args.proxy)
while True:
proxy = random.choice(proxies)
try:
threading.Thread(target=viewThread, args=(args.id, proxy,)).start()
time.sleep(0.15 if not args.delay else args.delay)
except KeyboardInterrupt:
print('\n* bye!')
os._exit(0)
else:
while True:
try:
threading.Thread(target=viewThread, args=(args.id, None,)).start()
time.sleep(0.15 if not args.delay else args.delay)
except KeyboardInterrupt:
print('\n* bye!')
os._exit(0) | visualize.py | import threading
import requests
import argparse
import random
import json
import time
import sys
import re
import os
status = {
'sent': 0,
'errors': 0,
}
class youtube:
vid = None
session = None
def __init__(self, vid):
self.vid = vid
self.session = requests.session()
def getPlayerConfig(self):
r = self.session.get('https://www.youtube.com/watch?v=' + self.vid, headers={
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Mobile/15E148 Safari/604.1',
'Accept': 'image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5',
'Accept-Language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'Referer': 'https://m.youtube.com/watch?v=' + self.vid
}).text
if sys.version_info.major < 3:
r = r.encode('utf-8', 'ignore')
if 'ytplayer.config' in r:
data = re.findall(r'ytplayer\.config = (.*);ytplayer\.web_player_context_config', r)
elif 'ytInitialPlayerConfig' in r:
data = re.findall(r'ytInitialPlayerConfig = (.*);\n', r)
data = json.loads(data[0])['args']['player_response']
player = json.loads(data)
return player
def getWatchtime(self):
config = self.getPlayerConfig()
vanilla = config['playbackTracking']['videostatsWatchtimeUrl']['baseUrl'].replace('\\u0026', '&').replace('%2C', ',')
cl = vanilla.split("cl=")[1].split("&")[0]
ei = vanilla.split("ei=")[1].split("&")[0]
of = vanilla.split("of=")[1].split("&")[0]
vm = vanilla.split('vm=')[1].split('&')[0]
return 'https://s.youtube.com/api/stats/watchtime?ns=yt&el=detailpage&cpn=isWmmj2C9Y2vULKF&docid=' + self.vid + '&ver=2&cmt=7334&ei=' + ei + '&fmt=133&fs=0&rt=1003&of=' + of + '&euri&lact=4418&live=dvr&cl=' + cl + '&state=playing&vm=' + vm + '&volume=100&c=MWEB&cver=2.20200313.03.00&cplayer=UNIPLAYER&cbrand=apple&cbr=Safari%20Mobile&cbrver=12.1.15E148&cmodel=iphone&cos=iPhone&cosver=12_2&cplatform=MOBILE&delay=5&hl=ru&cr=GB&rtn=1303&afmt=140&lio=1556394045.182&idpj=&ldpj=&rti=1003&muted=0&st=7334&et=7634'
def watchLive(self, proxy=None):
global status
try:
watch = self.getWatchtime()
self.session.get(watch, headers={
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Mobile/15E148 Safari/604.1',
'Accept': 'image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5',
'Accept-Language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
'Referer': 'https://m.youtube.com/watch?v=' + self.vid
}, proxies=proxy)
status['sent'] += 1
except:
status['errors'] += 1
def viewThread(vid, proxy=None):
proxy = {"https": "https://%s" % proxy} if proxy else None
youtube(vid).watchLive(proxy)
def statusThread():
global status
while True:
_status = '* sent: %s | errors: %s | total: %s' % (status['sent'], status['errors'], status['sent'] + status['errors'])
sys.stdout.write('%s\r' % _status)
def formatProxyList(proxy):
f = open(proxy, 'r')
l = f.read()
f.close()
return [x.rstrip().lstrip() for x in l.splitlines()]
if __name__ == '__main__':
print('* visualize.py - youtube live viewers bot')
print('* created by neon // @TheFamilyTeam')
print('* https://github.com/TheFamilyTeam')
print('')
parser = argparse.ArgumentParser(description='youtube live viewers bot - https://github.com/TheFamilyTeam')
parser.add_argument('--id', '-i', type=str, help='video id', required=True)
parser.add_argument('--proxy', '-p', type=str, help='proxy file')
parser.add_argument('--delay', '-d', type=float, help='bot delay')
args = parser.parse_args()
print('* botting...')
threading.Thread(target=statusThread).start()
if args.proxy:
if not os.path.isfile(args.proxy):
print('* invalid proxy file')
exit(1)
proxies = formatProxyList(args.proxy)
while True:
proxy = random.choice(proxies)
try:
threading.Thread(target=viewThread, args=(args.id, proxy,)).start()
time.sleep(0.15 if not args.delay else args.delay)
except KeyboardInterrupt:
print('\n* bye!')
os._exit(0)
else:
while True:
try:
threading.Thread(target=viewThread, args=(args.id, None,)).start()
time.sleep(0.15 if not args.delay else args.delay)
except KeyboardInterrupt:
print('\n* bye!')
os._exit(0) | 0.07153 | 0.080213 |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import random
import csv
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from collections import OrderedDict,defaultdict
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
net_glob5 = CNNMnist(args=args).to(args.device)
net_glob10 = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
net_glob.train()
net_glob5.train()
net_glob10.train()
#STRUCTURE: KEY = ROUND, VAL = [training_loss, {agentId:flattended_updates}]
malicious_structure5 = defaultdict()
malicious_structure10 = defaultdict()
#STRUCTURE: KEY = ROUND, VAL = [training_loss, {agentId: flattended_updates}]
non_malicious_structure = defaultdict()
non_malicious_structure5 = defaultdict()
non_malicious_structure10 = defaultdict()
# copy weights
w_glob = net_glob.state_dict()
w_glob5 = net_glob5.state_dict()
w_glob10 = net_glob10.state_dict()
# training - NO ATTACK
loss_train = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
#VIVEK constant attack experiment - 5 MALICIOUS
loss_train_5 = []
fixed_agent_5 = random.sample(range(32),5)
updates_recorded_mapping_5 = defaultdict(bool)
for i in fixed_agent_5:
updates_recorded_mapping_5[i] = False #KEY = agent no. & VAL = boolean
fixed_agent_storage_mapping_5 = {} #KEY = agent no. & VAL = Fixed Updates
count_array_5 = []
#VIVEK constant attack experiment - 10 MALICIOUS
loss_train_10 = []
fixed_agent_10 = random.sample(range(32),10)
updates_recorded_mapping_10 = defaultdict(bool)
for i in fixed_agent_10:
updates_recorded_mapping_10[i] = False
fixed_agent_storage_mapping_10 = {}
count_array_10 = []
for iter in range(args.epochs):
malicious_structure5[iter] = [0.0,defaultdict()]
malicious_structure10[iter] = [0.0,defaultdict()]
non_malicious_structure[iter] = [0.0,defaultdict()]
non_malicious_structure5[iter] = [0.0,defaultdict()]
non_malicious_structure10[iter] = [0.0,defaultdict()]
#agent_found_count = 0
w_locals, loss_locals = [], [] #w_locals = array of local_weights
w_locals_5, loss_locals_5 = [],[]
w_locals_10, loss_locals_10 = [],[]
m = max(int(args.frac * args.num_users), 1) #m = number of users used in one ROUND/EPOCH, check utils.options for more clarity on this
idxs_users = np.random.choice(range(args.num_users), m, replace=False) #Randomly selecting m users out of 32 users. NEED TO REPLACE THIS WITH OUR SAMPLING MECHANISM
for idx in idxs_users:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local5 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local10 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
w5, loss5 = local5.train(net=copy.deepcopy(net_glob5).to(args.device))
w10, loss10 = local10.train(net=copy.deepcopy(net_glob10).to(args.device))
#STRUCTURE: {agentId:{flattened_updates}}
agent_weight_dict = {idx:defaultdict()}
flattened_w = copy.deepcopy(w)
new_val = flattened_w['conv1.weight'].reshape(-1)
flattened_w['conv1.weight'] = new_val
new_val = flattened_w['conv2.weight'].reshape(-1)
flattened_w['conv2.weight'] = new_val
new_val = flattened_w['fc1.weight'].reshape(-1)
flattened_w['fc1.weight'] = new_val
new_val = flattened_w['fc2.weight'].reshape(-1)
flattened_w['fc2.weight'] = new_val
non_malicious_structure[iter][1][idx] = flattened_w
#print(flattened_w['conv1.weight'].shape)
#print(flattened_w['conv1.bias'].shape)
#print(flattened_w['conv2.weight'].shape)
#print(flattened_w['conv2.bias'].shape)
#print(flattened_w['fc1.weight'].shape)
#print(flattened_w['fc1.bias'].shape)
#print(flattened_w['fc2.weight'].shape)
#print(flattened_w['fc2.bias'].shape)
print("***BLAH BLAH BLAH***")
if idx in fixed_agent_5:
if updates_recorded_mapping_5[idx]:
w5 = copy.deepcopy(fixed_agent_storage_mapping_5[idx])
elif not updates_recorded_mapping_5[idx]:
fixed_agent_storage_mapping_5[idx] = copy.deepcopy(w5)
updates_recorded_mapping_5[idx] = True
flattened_w5 = copy.deepcopy(w5)
new_val = flattened_w5['conv1.weight'].reshape(-1)
flattened_w5['conv1.weight'] = new_val
new_val = flattened_w5['conv2.weight'].reshape(-1)
flattened_w5['conv2.weight'] = new_val
new_val = flattened_w5['fc1.weight'].reshape(-1)
flattened_w5['fc1.weight']= new_val
new_val = flattened_w5['fc2.weight'].reshape(-1)
flattened_w5['fc2.weight']= new_val
#ADD DATA TO MALICIOUS STRUCTURE
malicious_structure5[iter][1][idx] = flattened_w5
if idx not in fixed_agent_5:
flattened_w5 = copy.deepcopy(w5)
new_val = flattened_w5['conv1.weight'].reshape(-1)
flattened_w5['conv1.weight'] = new_val
new_val = flattened_w5['conv2.weight'].reshape(-1)
flattened_w5['conv2.weight'] = new_val
new_val = flattened_w5['fc1.weight'].reshape(-1)
flattened_w5['fc1.weight']= new_val
new_val = flattened_w5['fc2.weight'].reshape(-1)
flattened_w5['fc2.weight']= new_val
#ADD DATA TO NON-MALICIOUS STRUCTURE
non_malicious_structure5[iter][1][idx] = flattened_w5
if idx in fixed_agent_10:
if updates_recorded_mapping_10[idx]:
w10 = copy.deepcopy(fixed_agent_storage_mapping_10[idx])
elif not updates_recorded_mapping_10[idx]:
fixed_agent_storage_mapping_10[idx] = copy.deepcopy(w10)
updates_recorded_mapping_10[idx] = True
flattened_w10 = copy.deepcopy(w10)
new_val = flattened_w10['conv1.weight'].reshape(-1)
flattened_w10['conv1.weight'] = new_val
new_val = flattened_w10['conv2.weight'].reshape(-1)
flattened_w10['conv2.weight'] = new_val
new_val = flattened_w10['fc1.weight'].reshape(-1)
flattened_w10['fc1.weight']= new_val
new_val = flattened_w10['fc2.weight'].reshape(-1)
flattened_w10['fc2.weight']= new_val
#ADD DATA TO MALICIOUS STRUCTURE
malicious_structure10[iter][1][idx] = flattened_w10
if idx not in fixed_agent_10:
flattened_w10 = copy.deepcopy(w10)
new_val = flattened_w10['conv1.weight'].reshape(-1)
flattened_w10['conv1.weight'] = new_val
new_val = flattened_w10['conv2.weight'].reshape(-1)
flattened_w10['conv2.weight'] = new_val
new_val = flattened_w10['fc1.weight'].reshape(-1)
flattened_w10['fc1.weight']= new_val
new_val = flattened_w10['fc2.weight'].reshape(-1)
flattened_w10['fc2.weight']= new_val
#ADD DATA TO NON-MALICIOUS STRUCTURE
non_malicious_structure10[iter][1][idx] = flattened_w10
#NO ATTACK
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
#5 MALICIOUS
w_locals_5.append(copy.deepcopy(w5))
loss_locals_5.append(copy.deepcopy(loss5))
#10 MALICIOUS
w_locals_10.append(copy.deepcopy(w10))
loss_locals_10.append(copy.deepcopy(loss10))
# update global weights
w_glob = FedAvg(w_locals)
w_glob_5 = FedAvg(w_locals_5)
w_glob_10 = FedAvg(w_locals_10)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
net_glob5.load_state_dict(w_glob_5)
net_glob10.load_state_dict(w_glob_10)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
loss_avg_5 = sum(loss_locals_5) / len(loss_locals_5)
loss_avg_10 = sum(loss_locals_10) / len(loss_locals_10)
non_malicious_structure[iter][0] = loss_avg
non_malicious_structure5[iter][0] = loss_avg_5
non_malicious_structure10[iter][0] = loss_avg_10
malicious_structure5[iter][0] = loss_avg_5
malicious_structure10[iter][0] = loss_avg_10
print('NO ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
print('C5 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_5))
print('C10 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_10))
#count_array.append(agent_found_count)
loss_train.append(loss_avg)
loss_train_5.append(loss_avg_5)
loss_train_10.append(loss_avg_10)
# plot loss curve
#plt.figure()
#plt.subplots()
#attack_no = plt.plot(range(len(loss_train)), loss_train)
#attack_1 = plt.plot(range(len(loss_train_1)),loss_train_1)
#plt.ylabel('train_loss')
#plt.savefig('log/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
#print("COUNT DATA",str(count_array))
print("NO ATTACK DATA=",loss_train)
print("5 ATTACK DATA=",loss_train_5)
print("10 ATTACK DATA=",loss_train_10)
with open("no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure.keys():
writer.writerow((str(items),str(non_malicious_structure[items][0]),str(non_malicious_structure[items][1])))
with open("5_no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure5.keys():
writer.writerow((str(items),str(non_malicious_structure5[items][0]),str(non_malicious_structure5[items][1])))
with open("10_no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure10.keys():
writer.writerow((str(items),str(non_malicious_structure10[items][0]),str(non_malicious_structure10[items][1])))
with open("5_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in malicious_structure5.keys():
writer.writerow((str(items),str(malicious_structure5[items][0]),str(malicious_structure5[items][1])))
with open("10_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in malicious_structure10.keys():
writer.writerow((str(items),str(malicious_structure10[items][0]),str(malicious_structure10[items][1])))
# testing
net_glob.eval()
#print("Agent_Found_Count",agent_found_count)
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train))
print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test))
net_glob5.eval()
acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args)
acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5))
print("Testing accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_test5))
net_glob10.eval()
acc_train10, loss_train_10 = test_img(net_glob10, dataset_train, args)
acc_test10, loss_test_10 = test_img(net_glob10, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_train10))
print("Testing accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_test10)) | federated-learning-master/tristan_experiment_MNIST_constant.py |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import random
import csv
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from collections import OrderedDict,defaultdict
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
net_glob5 = CNNMnist(args=args).to(args.device)
net_glob10 = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
net_glob.train()
net_glob5.train()
net_glob10.train()
#STRUCTURE: KEY = ROUND, VAL = [training_loss, {agentId:flattended_updates}]
malicious_structure5 = defaultdict()
malicious_structure10 = defaultdict()
#STRUCTURE: KEY = ROUND, VAL = [training_loss, {agentId: flattended_updates}]
non_malicious_structure = defaultdict()
non_malicious_structure5 = defaultdict()
non_malicious_structure10 = defaultdict()
# copy weights
w_glob = net_glob.state_dict()
w_glob5 = net_glob5.state_dict()
w_glob10 = net_glob10.state_dict()
# training - NO ATTACK
loss_train = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
#VIVEK constant attack experiment - 5 MALICIOUS
loss_train_5 = []
fixed_agent_5 = random.sample(range(32),5)
updates_recorded_mapping_5 = defaultdict(bool)
for i in fixed_agent_5:
updates_recorded_mapping_5[i] = False #KEY = agent no. & VAL = boolean
fixed_agent_storage_mapping_5 = {} #KEY = agent no. & VAL = Fixed Updates
count_array_5 = []
#VIVEK constant attack experiment - 10 MALICIOUS
loss_train_10 = []
fixed_agent_10 = random.sample(range(32),10)
updates_recorded_mapping_10 = defaultdict(bool)
for i in fixed_agent_10:
updates_recorded_mapping_10[i] = False
fixed_agent_storage_mapping_10 = {}
count_array_10 = []
for iter in range(args.epochs):
malicious_structure5[iter] = [0.0,defaultdict()]
malicious_structure10[iter] = [0.0,defaultdict()]
non_malicious_structure[iter] = [0.0,defaultdict()]
non_malicious_structure5[iter] = [0.0,defaultdict()]
non_malicious_structure10[iter] = [0.0,defaultdict()]
#agent_found_count = 0
w_locals, loss_locals = [], [] #w_locals = array of local_weights
w_locals_5, loss_locals_5 = [],[]
w_locals_10, loss_locals_10 = [],[]
m = max(int(args.frac * args.num_users), 1) #m = number of users used in one ROUND/EPOCH, check utils.options for more clarity on this
idxs_users = np.random.choice(range(args.num_users), m, replace=False) #Randomly selecting m users out of 32 users. NEED TO REPLACE THIS WITH OUR SAMPLING MECHANISM
for idx in idxs_users:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local5 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local10 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
w5, loss5 = local5.train(net=copy.deepcopy(net_glob5).to(args.device))
w10, loss10 = local10.train(net=copy.deepcopy(net_glob10).to(args.device))
#STRUCTURE: {agentId:{flattened_updates}}
agent_weight_dict = {idx:defaultdict()}
flattened_w = copy.deepcopy(w)
new_val = flattened_w['conv1.weight'].reshape(-1)
flattened_w['conv1.weight'] = new_val
new_val = flattened_w['conv2.weight'].reshape(-1)
flattened_w['conv2.weight'] = new_val
new_val = flattened_w['fc1.weight'].reshape(-1)
flattened_w['fc1.weight'] = new_val
new_val = flattened_w['fc2.weight'].reshape(-1)
flattened_w['fc2.weight'] = new_val
non_malicious_structure[iter][1][idx] = flattened_w
#print(flattened_w['conv1.weight'].shape)
#print(flattened_w['conv1.bias'].shape)
#print(flattened_w['conv2.weight'].shape)
#print(flattened_w['conv2.bias'].shape)
#print(flattened_w['fc1.weight'].shape)
#print(flattened_w['fc1.bias'].shape)
#print(flattened_w['fc2.weight'].shape)
#print(flattened_w['fc2.bias'].shape)
print("***BLAH BLAH BLAH***")
if idx in fixed_agent_5:
if updates_recorded_mapping_5[idx]:
w5 = copy.deepcopy(fixed_agent_storage_mapping_5[idx])
elif not updates_recorded_mapping_5[idx]:
fixed_agent_storage_mapping_5[idx] = copy.deepcopy(w5)
updates_recorded_mapping_5[idx] = True
flattened_w5 = copy.deepcopy(w5)
new_val = flattened_w5['conv1.weight'].reshape(-1)
flattened_w5['conv1.weight'] = new_val
new_val = flattened_w5['conv2.weight'].reshape(-1)
flattened_w5['conv2.weight'] = new_val
new_val = flattened_w5['fc1.weight'].reshape(-1)
flattened_w5['fc1.weight']= new_val
new_val = flattened_w5['fc2.weight'].reshape(-1)
flattened_w5['fc2.weight']= new_val
#ADD DATA TO MALICIOUS STRUCTURE
malicious_structure5[iter][1][idx] = flattened_w5
if idx not in fixed_agent_5:
flattened_w5 = copy.deepcopy(w5)
new_val = flattened_w5['conv1.weight'].reshape(-1)
flattened_w5['conv1.weight'] = new_val
new_val = flattened_w5['conv2.weight'].reshape(-1)
flattened_w5['conv2.weight'] = new_val
new_val = flattened_w5['fc1.weight'].reshape(-1)
flattened_w5['fc1.weight']= new_val
new_val = flattened_w5['fc2.weight'].reshape(-1)
flattened_w5['fc2.weight']= new_val
#ADD DATA TO NON-MALICIOUS STRUCTURE
non_malicious_structure5[iter][1][idx] = flattened_w5
if idx in fixed_agent_10:
if updates_recorded_mapping_10[idx]:
w10 = copy.deepcopy(fixed_agent_storage_mapping_10[idx])
elif not updates_recorded_mapping_10[idx]:
fixed_agent_storage_mapping_10[idx] = copy.deepcopy(w10)
updates_recorded_mapping_10[idx] = True
flattened_w10 = copy.deepcopy(w10)
new_val = flattened_w10['conv1.weight'].reshape(-1)
flattened_w10['conv1.weight'] = new_val
new_val = flattened_w10['conv2.weight'].reshape(-1)
flattened_w10['conv2.weight'] = new_val
new_val = flattened_w10['fc1.weight'].reshape(-1)
flattened_w10['fc1.weight']= new_val
new_val = flattened_w10['fc2.weight'].reshape(-1)
flattened_w10['fc2.weight']= new_val
#ADD DATA TO MALICIOUS STRUCTURE
malicious_structure10[iter][1][idx] = flattened_w10
if idx not in fixed_agent_10:
flattened_w10 = copy.deepcopy(w10)
new_val = flattened_w10['conv1.weight'].reshape(-1)
flattened_w10['conv1.weight'] = new_val
new_val = flattened_w10['conv2.weight'].reshape(-1)
flattened_w10['conv2.weight'] = new_val
new_val = flattened_w10['fc1.weight'].reshape(-1)
flattened_w10['fc1.weight']= new_val
new_val = flattened_w10['fc2.weight'].reshape(-1)
flattened_w10['fc2.weight']= new_val
#ADD DATA TO NON-MALICIOUS STRUCTURE
non_malicious_structure10[iter][1][idx] = flattened_w10
#NO ATTACK
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
#5 MALICIOUS
w_locals_5.append(copy.deepcopy(w5))
loss_locals_5.append(copy.deepcopy(loss5))
#10 MALICIOUS
w_locals_10.append(copy.deepcopy(w10))
loss_locals_10.append(copy.deepcopy(loss10))
# update global weights
w_glob = FedAvg(w_locals)
w_glob_5 = FedAvg(w_locals_5)
w_glob_10 = FedAvg(w_locals_10)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
net_glob5.load_state_dict(w_glob_5)
net_glob10.load_state_dict(w_glob_10)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
loss_avg_5 = sum(loss_locals_5) / len(loss_locals_5)
loss_avg_10 = sum(loss_locals_10) / len(loss_locals_10)
non_malicious_structure[iter][0] = loss_avg
non_malicious_structure5[iter][0] = loss_avg_5
non_malicious_structure10[iter][0] = loss_avg_10
malicious_structure5[iter][0] = loss_avg_5
malicious_structure10[iter][0] = loss_avg_10
print('NO ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
print('C5 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_5))
print('C10 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_10))
#count_array.append(agent_found_count)
loss_train.append(loss_avg)
loss_train_5.append(loss_avg_5)
loss_train_10.append(loss_avg_10)
# plot loss curve
#plt.figure()
#plt.subplots()
#attack_no = plt.plot(range(len(loss_train)), loss_train)
#attack_1 = plt.plot(range(len(loss_train_1)),loss_train_1)
#plt.ylabel('train_loss')
#plt.savefig('log/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
#print("COUNT DATA",str(count_array))
print("NO ATTACK DATA=",loss_train)
print("5 ATTACK DATA=",loss_train_5)
print("10 ATTACK DATA=",loss_train_10)
with open("no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure.keys():
writer.writerow((str(items),str(non_malicious_structure[items][0]),str(non_malicious_structure[items][1])))
with open("5_no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure5.keys():
writer.writerow((str(items),str(non_malicious_structure5[items][0]),str(non_malicious_structure5[items][1])))
with open("10_no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure10.keys():
writer.writerow((str(items),str(non_malicious_structure10[items][0]),str(non_malicious_structure10[items][1])))
with open("5_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in malicious_structure5.keys():
writer.writerow((str(items),str(malicious_structure5[items][0]),str(malicious_structure5[items][1])))
with open("10_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in malicious_structure10.keys():
writer.writerow((str(items),str(malicious_structure10[items][0]),str(malicious_structure10[items][1])))
# testing
net_glob.eval()
#print("Agent_Found_Count",agent_found_count)
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train))
print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test))
net_glob5.eval()
acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args)
acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5))
print("Testing accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_test5))
net_glob10.eval()
acc_train10, loss_train_10 = test_img(net_glob10, dataset_train, args)
acc_test10, loss_test_10 = test_img(net_glob10, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_train10))
print("Testing accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_test10)) | 0.488283 | 0.470858 |
import os.path
from data.base_dataset import BaseDataset, get_params
from data.image_folder import make_dataset
import torchvision.transforms as transforms
import glob
from PIL import Image
class TextureDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
assert opt.data_class_a is not None, "--data-class-a is required"
assert opt.data_class_b is not None, "--data-class-b is required"
self.data_dir_a = opt.data_class_a
self.data_dir_b = opt.data_class_b
self.data_paths_a = glob.glob(os.path.join(opt.data_class_a, '*'))
self.data_paths_b = glob.glob(os.path.join(opt.data_class_b, '*'))
# assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths
"""
# read a image given a random integer index
a_path = self.data_paths_a[index]
b_path = self.data_paths_b[index]
a_image = Image.open(a_path).convert('RGB')
b_image = Image.open(b_path).convert('RGB')
# apply the same transform to both A and B
transform_params = get_params(self.opt, a_image.size)
a_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
b_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
a = a_transform(a_image)
b = b_transform(b_image)
return {'A': a, 'B': b, 'A_paths': a_path, 'B_paths': b_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return min(len(self.data_paths_a), len(self.data_paths_b))
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
transform_list.append(transforms.Lambda(lambda img: __crop(img)))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __crop(img, texture_size=512):
s = texture_size * 400 / 512
return img.crop((texture_size/2 - s/2, texture_size - s, texture_size/2 + s/2, texture_size)) | data/texture_dataset.py | import os.path
from data.base_dataset import BaseDataset, get_params
from data.image_folder import make_dataset
import torchvision.transforms as transforms
import glob
from PIL import Image
class TextureDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
assert opt.data_class_a is not None, "--data-class-a is required"
assert opt.data_class_b is not None, "--data-class-b is required"
self.data_dir_a = opt.data_class_a
self.data_dir_b = opt.data_class_b
self.data_paths_a = glob.glob(os.path.join(opt.data_class_a, '*'))
self.data_paths_b = glob.glob(os.path.join(opt.data_class_b, '*'))
# assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths
"""
# read a image given a random integer index
a_path = self.data_paths_a[index]
b_path = self.data_paths_b[index]
a_image = Image.open(a_path).convert('RGB')
b_image = Image.open(b_path).convert('RGB')
# apply the same transform to both A and B
transform_params = get_params(self.opt, a_image.size)
a_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
b_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
a = a_transform(a_image)
b = b_transform(b_image)
return {'A': a, 'B': b, 'A_paths': a_path, 'B_paths': b_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return min(len(self.data_paths_a), len(self.data_paths_b))
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
transform_list.append(transforms.Lambda(lambda img: __crop(img)))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __crop(img, texture_size=512):
s = texture_size * 400 / 512
return img.crop((texture_size/2 - s/2, texture_size - s, texture_size/2 + s/2, texture_size)) | 0.864081 | 0.570152 |
import math
import random
import utils
import operator
required_percepts = ['get_state']
required_actions = ['set_state','set_coloredtrace', 'advance','turnabit','turn_toward']
def average_direction(long_range, actions):
direction = 0
for bird, distance in long_range:
direction += bird.states['direction']
if len(long_range) > 0:
direction /= len(long_range)
actions['set_state']('direction', direction)
else:
actions['turnabit'](max_turn=math.pi/24)
def average(agent_list, world):
shape = world.get_shape()
mean_x, mean_y = 0, 0
weight_x, weight_y = 0, 0
for agent, dist in agent_list:
ax, ay = agent.states['position']
mean_x = utils.weighted_sum_wrap(mean_x, weight_x, ax, 1, shape[0])
weight_x += 1
mean_y = utils.weighted_sum_wrap(mean_y, weight_y, ay, 1, shape[1])
weight_y += 1
return (mean_x, mean_y)
def average_position(long_range, world, actions):
if len(long_range) > 0:
actions['turn_toward'](*average(long_range, world), wrap=True, max_turn=math.pi/10)
def local_repulsion(short_range, world, actions):
if len(short_range) > 0:
actions['turn_toward'](*average(short_range, world), wrap=True, max_turn=math.pi/5, get_closer=False)
actions['advance']()
def run(name, world, percepts, actions):
# Hide our old position
actions['set_pixel'](color=(0,0,0,0))
actions['set_state']('speed', 0.80)
short_range = percepts['agentorobject_rangequery'](agentorobject_name="flocking_bird", count_max=1000, dist_min=0.3, dist_max=4, distance=utils.distance(world.get_shape(), utils.WRAP))
short_range.sort(key=operator.itemgetter(1))
if not local_repulsion(short_range, world, actions):
long_range = percepts['agentorobject_rangequery'](agentorobject_name="flocking_bird", count_max=1000, dist_min=3, dist_max=20, distance=utils.distance(world.get_shape(), utils.WRAP))
average_direction(long_range, actions)
average_position(long_range, world, actions)
actions['advance']()
# Go on to next position
actions['set_coloredtrace']()
# Mark our current position
actions['set_pixel'](color=(0,255,255,255))
# Survive
return True | modules/agents/brains/flocking_bird.py |
import math
import random
import utils
import operator
required_percepts = ['get_state']
required_actions = ['set_state','set_coloredtrace', 'advance','turnabit','turn_toward']
def average_direction(long_range, actions):
direction = 0
for bird, distance in long_range:
direction += bird.states['direction']
if len(long_range) > 0:
direction /= len(long_range)
actions['set_state']('direction', direction)
else:
actions['turnabit'](max_turn=math.pi/24)
def average(agent_list, world):
shape = world.get_shape()
mean_x, mean_y = 0, 0
weight_x, weight_y = 0, 0
for agent, dist in agent_list:
ax, ay = agent.states['position']
mean_x = utils.weighted_sum_wrap(mean_x, weight_x, ax, 1, shape[0])
weight_x += 1
mean_y = utils.weighted_sum_wrap(mean_y, weight_y, ay, 1, shape[1])
weight_y += 1
return (mean_x, mean_y)
def average_position(long_range, world, actions):
if len(long_range) > 0:
actions['turn_toward'](*average(long_range, world), wrap=True, max_turn=math.pi/10)
def local_repulsion(short_range, world, actions):
if len(short_range) > 0:
actions['turn_toward'](*average(short_range, world), wrap=True, max_turn=math.pi/5, get_closer=False)
actions['advance']()
def run(name, world, percepts, actions):
# Hide our old position
actions['set_pixel'](color=(0,0,0,0))
actions['set_state']('speed', 0.80)
short_range = percepts['agentorobject_rangequery'](agentorobject_name="flocking_bird", count_max=1000, dist_min=0.3, dist_max=4, distance=utils.distance(world.get_shape(), utils.WRAP))
short_range.sort(key=operator.itemgetter(1))
if not local_repulsion(short_range, world, actions):
long_range = percepts['agentorobject_rangequery'](agentorobject_name="flocking_bird", count_max=1000, dist_min=3, dist_max=20, distance=utils.distance(world.get_shape(), utils.WRAP))
average_direction(long_range, actions)
average_position(long_range, world, actions)
actions['advance']()
# Go on to next position
actions['set_coloredtrace']()
# Mark our current position
actions['set_pixel'](color=(0,255,255,255))
# Survive
return True | 0.382257 | 0.431884 |
from __future__ import print_function
import os
import argparse
import socket
import time
import tensorboard_logger as tb_logger
import torch
import torch.optim as optim
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
from models import model_dict
from dataset.cifar100 import get_cifar100_dataloaders
from dataset.cifar10 import get_cifar10_dataloaders
from helper.util import adjust_learning_rate, accuracy, AverageMeter
from helper.loops import train_vanilla as train, validate
def parse_option():
hostname = socket.gethostname()
parser = argparse.ArgumentParser("argument for training")
parser.add_argument("--print_freq", type=int, default=100, help="print frequency")
parser.add_argument("--tb_freq", type=int, default=500, help="tb frequency")
parser.add_argument("--save_freq", type=int, default=40, help="save frequency")
parser.add_argument("--batch_size", type=int, default=64, help="batch_size")
parser.add_argument(
"--num_workers", type=int, default=8, help="num of workers to use"
)
parser.add_argument(
"--epochs", type=int, default=240, help="number of training epochs"
)
# optimization
parser.add_argument(
"--learning_rate", type=float, default=0.05, help="learning rate"
)
parser.add_argument(
"--lr_decay_epochs",
type=str,
default="150,180,210",
help="where to decay lr, can be a list",
)
parser.add_argument(
"--lr_decay_rate", type=float, default=0.1, help="decay rate for learning rate"
)
parser.add_argument("--weight_decay", type=float, default=5e-4, help="weight decay")
parser.add_argument("--momentum", type=float, default=0.9, help="momentum")
# dataset
parser.add_argument(
"--model",
type=str,
default="resnet110",
choices=[
"resnet8",
"resnet14",
"resnet20",
"resnet32",
"resnet44",
"resnet56",
"resnet110",
"resnet8x4",
"resnet32x4",
"wrn_16_1",
"wrn_16_2",
"wrn_40_1",
"wrn_40_2",
"vgg8",
"vgg11",
"vgg13",
"vgg16",
"vgg19",
"MobileNetV2",
"ShuffleV1",
"ShuffleV2",
],
)
parser.add_argument(
"--dataset",
type=str,
default="cifar10",
choices=["cifar100", "cifar10"],
help="dataset",
)
parser.add_argument(
"-t", "--trial", type=str, default="first", help="the experiment id"
)
parser.add_argument(
"--train_rule",
default="None",
type=str,
choices=["None", "Resample", "Reweight", "DRW"],
)
opt = parser.parse_args()
# set different learning rate from these 4 models
if opt.model in ["MobileNetV2", "ShuffleV1", "ShuffleV2"]:
opt.learning_rate = 0.01
# set the path according to the environment
if hostname.startswith("visiongpu"):
opt.model_path = "/path/to/my/model"
opt.tb_path = "/path/to/my/tensorboard"
else:
opt.model_path = "./save/models"
opt.tb_path = "./save/tensorboard"
iterations = opt.lr_decay_epochs.split(",")
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = "{}_{}_lr_{}_decay_{}_trial_{}".format(
opt.model, opt.dataset, opt.learning_rate, opt.weight_decay, opt.trial
)
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
return opt
def main():
best_acc = 0
best_cls_acc = []
opt = parse_option()
# dataloader
if opt.dataset == "cifar100":
train_loader, val_loader = get_cifar100_dataloaders(
batch_size=opt.batch_size,
num_workers=opt.num_workers,
train_rule=opt.train_rule,
)
n_cls = 100
elif opt.dataset == "cifar10":
train_loader, val_loader = get_cifar10_dataloaders(
batch_size=opt.batch_size,
num_workers=opt.num_workers,
train_rule=opt.train_rule,
)
n_cls = 10
else:
raise NotImplementedError(opt.dataset)
# model
model = model_dict[opt.model](num_classes=n_cls)
# optimizer
optimizer = optim.SGD(
model.parameters(),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay,
)
cls_num_list = train_loader.dataset.get_cls_num_list()
if opt.train_rule == "Reweight":
beta = 0.9999
effective_num = 1.0 - np.power(beta, cls_num_list)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
ce_weight = torch.FloatTensor(per_cls_weights).cuda()
print(f"Re-weighting {ce_weight}")
criterion = nn.CrossEntropyLoss()
if torch.cuda.is_available():
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
# tensorboard
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
# routine
iteration = 0
for epoch in range(1, opt.epochs + 1):
adjust_learning_rate(epoch, opt, optimizer)
print("==> training...")
time1 = time.time()
train_acc, train_loss, iteration = train(
epoch, train_loader, model, criterion, optimizer, opt, logger, iteration
)
time2 = time.time()
print("epoch {}, total time {:.2f}".format(epoch, time2 - time1))
logger.log_value("train_acc", train_acc, epoch)
logger.log_value("train_loss", train_loss, epoch)
test_acc, test_acc_top5, test_loss, cls_acc = validate(
val_loader, model, criterion, opt, logger, epoch, cls_num_list
)
# save the best model
if test_acc > best_acc:
best_acc = test_acc
best_cls_acc = cls_acc
state = {
"epoch": epoch,
"model": model.state_dict(),
"best_acc": best_acc,
"optimizer": optimizer.state_dict(),
}
save_file = os.path.join(opt.save_folder, "{}_best.pth".format(opt.model))
print("saving the best model!")
torch.save(state, save_file)
# regular saving
if epoch % opt.save_freq == 0:
print("==> Saving...")
state = {
"epoch": epoch,
"model": model.state_dict(),
"accuracy": test_acc,
"optimizer": optimizer.state_dict(),
}
save_file = os.path.join(
opt.save_folder, "ckpt_epoch_{epoch}.pth".format(epoch=epoch)
)
torch.save(state, save_file)
# This best accuracy is only for printing purpose.
# The results reported in the paper/README is from the last epoch.
print("best accuracy:", best_acc, best_cls_acc)
# save model
state = {
"opt": opt,
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
save_file = os.path.join(opt.save_folder, "{}_last.pth".format(opt.model))
torch.save(state, save_file)
if __name__ == "__main__":
main() | train_teacher.py | from __future__ import print_function
import os
import argparse
import socket
import time
import tensorboard_logger as tb_logger
import torch
import torch.optim as optim
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
from models import model_dict
from dataset.cifar100 import get_cifar100_dataloaders
from dataset.cifar10 import get_cifar10_dataloaders
from helper.util import adjust_learning_rate, accuracy, AverageMeter
from helper.loops import train_vanilla as train, validate
def parse_option():
hostname = socket.gethostname()
parser = argparse.ArgumentParser("argument for training")
parser.add_argument("--print_freq", type=int, default=100, help="print frequency")
parser.add_argument("--tb_freq", type=int, default=500, help="tb frequency")
parser.add_argument("--save_freq", type=int, default=40, help="save frequency")
parser.add_argument("--batch_size", type=int, default=64, help="batch_size")
parser.add_argument(
"--num_workers", type=int, default=8, help="num of workers to use"
)
parser.add_argument(
"--epochs", type=int, default=240, help="number of training epochs"
)
# optimization
parser.add_argument(
"--learning_rate", type=float, default=0.05, help="learning rate"
)
parser.add_argument(
"--lr_decay_epochs",
type=str,
default="150,180,210",
help="where to decay lr, can be a list",
)
parser.add_argument(
"--lr_decay_rate", type=float, default=0.1, help="decay rate for learning rate"
)
parser.add_argument("--weight_decay", type=float, default=5e-4, help="weight decay")
parser.add_argument("--momentum", type=float, default=0.9, help="momentum")
# dataset
parser.add_argument(
"--model",
type=str,
default="resnet110",
choices=[
"resnet8",
"resnet14",
"resnet20",
"resnet32",
"resnet44",
"resnet56",
"resnet110",
"resnet8x4",
"resnet32x4",
"wrn_16_1",
"wrn_16_2",
"wrn_40_1",
"wrn_40_2",
"vgg8",
"vgg11",
"vgg13",
"vgg16",
"vgg19",
"MobileNetV2",
"ShuffleV1",
"ShuffleV2",
],
)
parser.add_argument(
"--dataset",
type=str,
default="cifar10",
choices=["cifar100", "cifar10"],
help="dataset",
)
parser.add_argument(
"-t", "--trial", type=str, default="first", help="the experiment id"
)
parser.add_argument(
"--train_rule",
default="None",
type=str,
choices=["None", "Resample", "Reweight", "DRW"],
)
opt = parser.parse_args()
# set different learning rate from these 4 models
if opt.model in ["MobileNetV2", "ShuffleV1", "ShuffleV2"]:
opt.learning_rate = 0.01
# set the path according to the environment
if hostname.startswith("visiongpu"):
opt.model_path = "/path/to/my/model"
opt.tb_path = "/path/to/my/tensorboard"
else:
opt.model_path = "./save/models"
opt.tb_path = "./save/tensorboard"
iterations = opt.lr_decay_epochs.split(",")
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = "{}_{}_lr_{}_decay_{}_trial_{}".format(
opt.model, opt.dataset, opt.learning_rate, opt.weight_decay, opt.trial
)
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
return opt
def main():
best_acc = 0
best_cls_acc = []
opt = parse_option()
# dataloader
if opt.dataset == "cifar100":
train_loader, val_loader = get_cifar100_dataloaders(
batch_size=opt.batch_size,
num_workers=opt.num_workers,
train_rule=opt.train_rule,
)
n_cls = 100
elif opt.dataset == "cifar10":
train_loader, val_loader = get_cifar10_dataloaders(
batch_size=opt.batch_size,
num_workers=opt.num_workers,
train_rule=opt.train_rule,
)
n_cls = 10
else:
raise NotImplementedError(opt.dataset)
# model
model = model_dict[opt.model](num_classes=n_cls)
# optimizer
optimizer = optim.SGD(
model.parameters(),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay,
)
cls_num_list = train_loader.dataset.get_cls_num_list()
if opt.train_rule == "Reweight":
beta = 0.9999
effective_num = 1.0 - np.power(beta, cls_num_list)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
ce_weight = torch.FloatTensor(per_cls_weights).cuda()
print(f"Re-weighting {ce_weight}")
criterion = nn.CrossEntropyLoss()
if torch.cuda.is_available():
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
# tensorboard
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
# routine
iteration = 0
for epoch in range(1, opt.epochs + 1):
adjust_learning_rate(epoch, opt, optimizer)
print("==> training...")
time1 = time.time()
train_acc, train_loss, iteration = train(
epoch, train_loader, model, criterion, optimizer, opt, logger, iteration
)
time2 = time.time()
print("epoch {}, total time {:.2f}".format(epoch, time2 - time1))
logger.log_value("train_acc", train_acc, epoch)
logger.log_value("train_loss", train_loss, epoch)
test_acc, test_acc_top5, test_loss, cls_acc = validate(
val_loader, model, criterion, opt, logger, epoch, cls_num_list
)
# save the best model
if test_acc > best_acc:
best_acc = test_acc
best_cls_acc = cls_acc
state = {
"epoch": epoch,
"model": model.state_dict(),
"best_acc": best_acc,
"optimizer": optimizer.state_dict(),
}
save_file = os.path.join(opt.save_folder, "{}_best.pth".format(opt.model))
print("saving the best model!")
torch.save(state, save_file)
# regular saving
if epoch % opt.save_freq == 0:
print("==> Saving...")
state = {
"epoch": epoch,
"model": model.state_dict(),
"accuracy": test_acc,
"optimizer": optimizer.state_dict(),
}
save_file = os.path.join(
opt.save_folder, "ckpt_epoch_{epoch}.pth".format(epoch=epoch)
)
torch.save(state, save_file)
# This best accuracy is only for printing purpose.
# The results reported in the paper/README is from the last epoch.
print("best accuracy:", best_acc, best_cls_acc)
# save model
state = {
"opt": opt,
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
save_file = os.path.join(opt.save_folder, "{}_last.pth".format(opt.model))
torch.save(state, save_file)
if __name__ == "__main__":
main() | 0.753467 | 0.124479 |
from argopandas.downloader import download_async
import os
import urllib.request
import tempfile
from typing import BinaryIO
class PathsDoNotExistError(Exception):
"""
The exception thrown when one or more requested path
does not exist locally and could not be downloaded.
"""
def __init__(self, bad_paths, errors=None):
self.bad_paths = bad_paths
self.errors = errors
path_summary = "\n".join("'" + path + "'" for path in bad_paths[:20])
if len(bad_paths) > 20:
path_summary = path_summary + f'\n...and {len(bad_paths) - 20} more'
super().__init__(path_summary)
class Mirror:
"""
The ``Mirror`` class is the abstract base class for
other mirror types. You can define your own subclass
and use it in the main API if you have a non-standard
mapping of files and would like to use features of the
package-level API.
:param path: A path to a file on the GDAC (e.g.,
/dac/csio/1234/1234_meta.nc)
"""
def __init__(self):
# used to cache objects generated from the mirror like global indexes
self.cache = {}
def reset(self):
self.cache = {}
def open(self, path) -> BinaryIO:
"""Get a file-like object for this ``path``."""
raise NotImplementedError()
def filename(self, path) -> str:
"""
Get a filename for this path. The filename is not
guaranteed to exist unless :meth:`prepare` is called
first.
"""
raise NotImplementedError()
def prepare(self, path_iter):
"""
Prepare the mirror for loading all the paths in
``path_iter`` (e.g., by downloading them).
:param path_iter: An iterable of ``path`` s.
"""
raise NotImplementedError()
def url(self, path):
"""
Return the URL to ``path`` without checking
if it exists.
"""
raise NotImplementedError()
def netcdf_dataset_src(self, path):
"""
Return the best available input to
:class:`argopandas.netcdf.NetCDFWrapper`.
"""
raise NotImplementedError()
class FileMirror(Mirror):
"""
The ``FileMirror`` maps a root directory on a filesystem.
This is useful if you have a local copy of Argo downloaded
via ``rsync`` or via a stable DOI version of the GDAC. This
can also be a partial copy if you have a few files you
need to access frequently.
"""
def __init__(self, root):
"""
:param root: The root directory containing the files.
"""
super().__init__()
if not os.path.isdir(root):
raise ValueError(f"'{root}' is not a directory")
self._root = root
def __repr__(self) -> str:
return f"argo.FileMirror({repr(self._root)})"
def open(self, path) -> BinaryIO:
return open(os.path.join(self._root, path), mode='rb')
def filename(self, path) -> str:
return os.path.join(self._root, path)
def url(self, path) -> str:
abspath = os.path.abspath(self.filename(path))
return 'file://' + abspath.replace('\\', '/')
def prepare(self, path_iter):
bad_paths = []
for path in path_iter:
abs_path = os.path.join(self._root, path)
if not os.path.isfile(abs_path):
bad_paths.append(path)
if bad_paths:
raise PathsDoNotExistError(bad_paths)
return self
def netcdf_dataset_src(self, path):
return self.filename(path)
class UrlMirror(Mirror):
"""
The ``UrlMirror`` is a cache-less mirror that only uses
URL connections. You probably want the :class:`CachedUrlMirror`
unless you are doing real-time work that might be affected
by an out-of-date cache. Note that :meth:`filename` is not
supported by the ``UrlMirror`` (use :meth:`open` instead).
"""
def __init__(self, root):
"""
:param root: The URL of the base directory. This can
be anything supported by ``urllib.request.urlopen``.
"""
super().__init__()
if root.endswith('/'):
root = root[:-1]
self._root = root
def __repr__(self) -> str:
return f"argo.UrlMirror({repr(self._root)})"
def open(self, path) -> BinaryIO:
return urllib.request.urlopen(self.url(path))
def filename(self, path) -> str:
raise NotImplementedError()
def url(self, path) -> str:
if path.startswith('/'):
path = path[1:]
return '/'.join((self._root, path))
def prepare(self, path_iter):
return self
def netcdf_dataset_src(self, path):
return self.url(path)
class CachedUrlMirror(UrlMirror):
"""
This is the most common mirror, which uses a cache
to avoid unnecessarily downloading the same file
more than once. By default the cache will reset
when the session is restarted; however, you can set
a persistent cache using ``cache_dir``.
"""
def __init__(self, root, cache_dir=None):
"""
:param root: The URL of the base directory. This can
be anything supported by ``urllib.request.urlopen``.
:param cache_dir: The path to the local persistent cache
or ``None`` to use a temporary directory.
"""
super().__init__(root)
self._temp_dir = None
if cache_dir is None:
self._temp_dir = tempfile.TemporaryDirectory()
self._cache_dir = self._temp_dir.name
else:
if not os.path.isdir(cache_dir):
raise ValueError(f"'{cache_dir}' is not a directory")
self._cache_dir = cache_dir
def __del__(self):
if self._temp_dir is not None:
self._temp_dir.cleanup()
def reset(self):
super().reset()
# only delete the cache directory if it's a tempdir
if self._temp_dir is not None:
self._temp_dir.cleanup()
self._temp_dir = tempfile.TemporaryDirectory()
self._cache_dir = self._temp_dir.name
def __repr__(self) -> str:
if self._temp_dir is None:
return f"argo.CachedUrlMirror({repr(self._root)}, {repr(self._cache_dir)})"
else:
return f"argo.CachedUrlMirror({repr(self._root)})"
def open(self, path) -> BinaryIO:
return open(self.filename(path), 'rb')
def filename(self, path) -> str:
return os.path.join(self._cache_dir, path)
def prepare(self, path_iter):
paths = list(path_iter)
files = zip(
paths,
[self.url(path) for path in paths],
[self.filename(path) for path in paths]
)
downloads = [(path, url, dest) for path, url, dest in files if not os.path.exists(dest)]
download_files = [(item[1], item[2]) for item in downloads]
errors = download_async(download_files, quiet=False, max_errors=50)
if errors:
path_index, errors = zip(*errors)
bad_paths = [download_files[i][0] for i in path_index]
raise PathsDoNotExistError(bad_paths, errors)
return self
def netcdf_dataset_src(self, path):
return self.filename(path) | argopandas/mirror.py | from argopandas.downloader import download_async
import os
import urllib.request
import tempfile
from typing import BinaryIO
class PathsDoNotExistError(Exception):
"""
The exception thrown when one or more requested path
does not exist locally and could not be downloaded.
"""
def __init__(self, bad_paths, errors=None):
self.bad_paths = bad_paths
self.errors = errors
path_summary = "\n".join("'" + path + "'" for path in bad_paths[:20])
if len(bad_paths) > 20:
path_summary = path_summary + f'\n...and {len(bad_paths) - 20} more'
super().__init__(path_summary)
class Mirror:
"""
The ``Mirror`` class is the abstract base class for
other mirror types. You can define your own subclass
and use it in the main API if you have a non-standard
mapping of files and would like to use features of the
package-level API.
:param path: A path to a file on the GDAC (e.g.,
/dac/csio/1234/1234_meta.nc)
"""
def __init__(self):
# used to cache objects generated from the mirror like global indexes
self.cache = {}
def reset(self):
self.cache = {}
def open(self, path) -> BinaryIO:
"""Get a file-like object for this ``path``."""
raise NotImplementedError()
def filename(self, path) -> str:
"""
Get a filename for this path. The filename is not
guaranteed to exist unless :meth:`prepare` is called
first.
"""
raise NotImplementedError()
def prepare(self, path_iter):
"""
Prepare the mirror for loading all the paths in
``path_iter`` (e.g., by downloading them).
:param path_iter: An iterable of ``path`` s.
"""
raise NotImplementedError()
def url(self, path):
"""
Return the URL to ``path`` without checking
if it exists.
"""
raise NotImplementedError()
def netcdf_dataset_src(self, path):
"""
Return the best available input to
:class:`argopandas.netcdf.NetCDFWrapper`.
"""
raise NotImplementedError()
class FileMirror(Mirror):
"""
The ``FileMirror`` maps a root directory on a filesystem.
This is useful if you have a local copy of Argo downloaded
via ``rsync`` or via a stable DOI version of the GDAC. This
can also be a partial copy if you have a few files you
need to access frequently.
"""
def __init__(self, root):
"""
:param root: The root directory containing the files.
"""
super().__init__()
if not os.path.isdir(root):
raise ValueError(f"'{root}' is not a directory")
self._root = root
def __repr__(self) -> str:
return f"argo.FileMirror({repr(self._root)})"
def open(self, path) -> BinaryIO:
return open(os.path.join(self._root, path), mode='rb')
def filename(self, path) -> str:
return os.path.join(self._root, path)
def url(self, path) -> str:
abspath = os.path.abspath(self.filename(path))
return 'file://' + abspath.replace('\\', '/')
def prepare(self, path_iter):
bad_paths = []
for path in path_iter:
abs_path = os.path.join(self._root, path)
if not os.path.isfile(abs_path):
bad_paths.append(path)
if bad_paths:
raise PathsDoNotExistError(bad_paths)
return self
def netcdf_dataset_src(self, path):
return self.filename(path)
class UrlMirror(Mirror):
"""
The ``UrlMirror`` is a cache-less mirror that only uses
URL connections. You probably want the :class:`CachedUrlMirror`
unless you are doing real-time work that might be affected
by an out-of-date cache. Note that :meth:`filename` is not
supported by the ``UrlMirror`` (use :meth:`open` instead).
"""
def __init__(self, root):
"""
:param root: The URL of the base directory. This can
be anything supported by ``urllib.request.urlopen``.
"""
super().__init__()
if root.endswith('/'):
root = root[:-1]
self._root = root
def __repr__(self) -> str:
return f"argo.UrlMirror({repr(self._root)})"
def open(self, path) -> BinaryIO:
return urllib.request.urlopen(self.url(path))
def filename(self, path) -> str:
raise NotImplementedError()
def url(self, path) -> str:
if path.startswith('/'):
path = path[1:]
return '/'.join((self._root, path))
def prepare(self, path_iter):
return self
def netcdf_dataset_src(self, path):
return self.url(path)
class CachedUrlMirror(UrlMirror):
"""
This is the most common mirror, which uses a cache
to avoid unnecessarily downloading the same file
more than once. By default the cache will reset
when the session is restarted; however, you can set
a persistent cache using ``cache_dir``.
"""
def __init__(self, root, cache_dir=None):
"""
:param root: The URL of the base directory. This can
be anything supported by ``urllib.request.urlopen``.
:param cache_dir: The path to the local persistent cache
or ``None`` to use a temporary directory.
"""
super().__init__(root)
self._temp_dir = None
if cache_dir is None:
self._temp_dir = tempfile.TemporaryDirectory()
self._cache_dir = self._temp_dir.name
else:
if not os.path.isdir(cache_dir):
raise ValueError(f"'{cache_dir}' is not a directory")
self._cache_dir = cache_dir
def __del__(self):
if self._temp_dir is not None:
self._temp_dir.cleanup()
def reset(self):
super().reset()
# only delete the cache directory if it's a tempdir
if self._temp_dir is not None:
self._temp_dir.cleanup()
self._temp_dir = tempfile.TemporaryDirectory()
self._cache_dir = self._temp_dir.name
def __repr__(self) -> str:
if self._temp_dir is None:
return f"argo.CachedUrlMirror({repr(self._root)}, {repr(self._cache_dir)})"
else:
return f"argo.CachedUrlMirror({repr(self._root)})"
def open(self, path) -> BinaryIO:
return open(self.filename(path), 'rb')
def filename(self, path) -> str:
return os.path.join(self._cache_dir, path)
def prepare(self, path_iter):
paths = list(path_iter)
files = zip(
paths,
[self.url(path) for path in paths],
[self.filename(path) for path in paths]
)
downloads = [(path, url, dest) for path, url, dest in files if not os.path.exists(dest)]
download_files = [(item[1], item[2]) for item in downloads]
errors = download_async(download_files, quiet=False, max_errors=50)
if errors:
path_index, errors = zip(*errors)
bad_paths = [download_files[i][0] for i in path_index]
raise PathsDoNotExistError(bad_paths, errors)
return self
def netcdf_dataset_src(self, path):
return self.filename(path) | 0.802168 | 0.256579 |
import shap
from stratx.ice import friedman_partial_dependence
from stratx import plot_stratpd
from articles.pd.support import synthetic_interaction_data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn.ensemble import RandomForestRegressor
np.random.seed(1) # pick seed for reproducible article images
# reuse exact same data used by ALE
# n = 1000
# df = synthetic_interaction_data(n)
df = pd.read_csv("interaction.csv")
X, y = df[['x1', 'x2', 'x3']].copy(), df['y'].copy()
X1 = X.iloc[:, 0]
X2 = X.iloc[:, 1]
X3 = X.iloc[:, 2] # UNUSED in y
rf = RandomForestRegressor(n_estimators=30, oob_score=True)
rf.fit(X, y)
print("R^2 training", rf.score(X, y))
print("R^2 OOB", rf.oob_score_)
print("mean(y) =", np.mean(y))
print("mean(X_1), mean(X_2) =", np.mean(X1), np.mean(X2))
pdp_x1 = friedman_partial_dependence(rf, X, 'x1', numx=None, mean_centered=False)
pdp_x2 = friedman_partial_dependence(rf, X, 'x2', numx=None, mean_centered=False)
pdp_x3 = friedman_partial_dependence(rf, X, 'x3', numx=None, mean_centered=False)
m1 = np.mean(pdp_x1[1])
m2 = np.mean(pdp_x2[1])
m3 = np.mean(pdp_x3[1])
print("mean(PDP_1) =", np.mean(pdp_x1[1]))
print("mean(PDP_2) =", np.mean(pdp_x2[1]))
print("mean(PDP_2) =", np.mean(pdp_x3[1]))
print("mean abs PDP_1-ybar", np.mean(np.abs(pdp_x1[1] - m1)))
print("mean abs PDP_2-ybar", np.mean(np.abs(pdp_x2[1] - m2)))
print("mean abs PDP_3-ybar", np.mean(np.abs(pdp_x3[1] - m3)))
explainer = shap.TreeExplainer(rf, data=X,
feature_perturbation='interventional')
shap_values = explainer.shap_values(X, check_additivity=False)
shapavg = np.mean(shap_values, axis=0)
print("SHAP avg x1,x2,x3 =", shapavg)
shapimp = np.mean(np.abs(shap_values), axis=0)
print("SHAP avg |x1|,|x2|,|x3| =", shapimp)
fig, axes = plt.subplots(1,2,figsize=(5.5,2.8))
x1_color = '#1E88E5'
x2_color = 'orange'
x3_color = '#A22396'
axes[0].plot(pdp_x1[0], pdp_x1[1], '.', markersize=1, c=x1_color, label='$FPD_1$', alpha=1)
axes[0].plot(pdp_x2[0], pdp_x2[1], '.', markersize=1, c=x2_color, label='$FPD_2$', alpha=1)
axes[0].plot(pdp_x3[0], pdp_x3[1], '.', markersize=1, c=x3_color, label='$FPD_3$', alpha=1)
axes[0].text(0, 75, f"$\\bar{{y}}={np.mean(y):.1f}$", fontsize=13)
axes[0].set_xticks([0,2,4,6,8,10])
axes[0].set_xlabel("$x_1, x_2, x_3$", fontsize=10)
axes[0].set_ylabel("y")
axes[0].set_yticks([0, 25, 50, 75, 100, 125, 150])
axes[0].set_ylim(-10,160)
axes[0].set_title(f"(a) Friedman FPD")
axes[0].spines['top'].set_linewidth(.5)
axes[0].spines['right'].set_linewidth(.5)
axes[0].spines['left'].set_linewidth(.5)
axes[0].spines['bottom'].set_linewidth(.5)
axes[0].spines['top'].set_color('none')
axes[0].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[0].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=10)
# axes[0].legend(fontsize=10)
#axes[1].plot(shap_values)
shap.dependence_plot("x1", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x1_color)
shap.dependence_plot("x2", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x2_color)
shap.dependence_plot("x3", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x3_color)
axes[1].set_xticks([0,2,4,6,8,10])
axes[1].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
axes[1].set_ylim(-95,110)
axes[1].set_title("(b) SHAP")
axes[1].set_ylabel("SHAP values", fontsize=11)
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[1].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12)
if False:
df_x1 = pd.read_csv("../images/x1_ale.csv")
df_x2 = pd.read_csv("../images/x2_ale.csv")
df_x3 = pd.read_csv("../images/x3_ale.csv")
axes[2].plot(df_x1['x.values'],df_x1['f.values'],'.',color=x1_color,markersize=2)
axes[2].plot(df_x2['x.values'],df_x2['f.values'],'.',color=x2_color,markersize=2)
axes[2].plot(df_x3['x.values'],df_x3['f.values'],'.',color=x3_color,markersize=2)
axes[2].set_title("(c) ALE")
# axes[2].set_ylabel("y", fontsize=12)
axes[2].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
axes[2].set_ylim(-95,110)
# axes[2].tick_params(axis='both', which='major', labelsize=10)
axes[2].set_xticks([0,2,4,6,8,10])
axes[2].spines['top'].set_linewidth(.5)
axes[2].spines['right'].set_linewidth(.5)
axes[2].spines['left'].set_linewidth(.5)
axes[2].spines['bottom'].set_linewidth(.5)
axes[2].spines['top'].set_color('none')
axes[2].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[2].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12, loc='upper left')
plot_stratpd(X, y, "x1", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x1_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
plot_stratpd(X, y, "x2", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x2_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
plot_stratpd(X, y, "x3", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x3_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
axes[3].set_xticks([0,2,4,6,8,10])
axes[3].set_ylim(-20,160)
axes[3].set_yticks([0, 25, 50, 75, 100, 125, 150])
axes[3].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
# axes[3].set_ylabel("y", fontsize=12)
axes[3].set_title("(d) StratPD")
axes[3].spines['top'].set_linewidth(.5)
axes[3].spines['right'].set_linewidth(.5)
axes[3].spines['left'].set_linewidth(.5)
axes[3].spines['bottom'].set_linewidth(.5)
axes[3].spines['top'].set_color('none')
axes[3].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[3].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12)
plt.tight_layout()
plt.savefig("../images/FPD-SHAP-PD.pdf")
plt.show() | articles/imp/genfigs/compare_friedman_shap_stratimpact.py | import shap
from stratx.ice import friedman_partial_dependence
from stratx import plot_stratpd
from articles.pd.support import synthetic_interaction_data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn.ensemble import RandomForestRegressor
np.random.seed(1) # pick seed for reproducible article images
# reuse exact same data used by ALE
# n = 1000
# df = synthetic_interaction_data(n)
df = pd.read_csv("interaction.csv")
X, y = df[['x1', 'x2', 'x3']].copy(), df['y'].copy()
X1 = X.iloc[:, 0]
X2 = X.iloc[:, 1]
X3 = X.iloc[:, 2] # UNUSED in y
rf = RandomForestRegressor(n_estimators=30, oob_score=True)
rf.fit(X, y)
print("R^2 training", rf.score(X, y))
print("R^2 OOB", rf.oob_score_)
print("mean(y) =", np.mean(y))
print("mean(X_1), mean(X_2) =", np.mean(X1), np.mean(X2))
pdp_x1 = friedman_partial_dependence(rf, X, 'x1', numx=None, mean_centered=False)
pdp_x2 = friedman_partial_dependence(rf, X, 'x2', numx=None, mean_centered=False)
pdp_x3 = friedman_partial_dependence(rf, X, 'x3', numx=None, mean_centered=False)
m1 = np.mean(pdp_x1[1])
m2 = np.mean(pdp_x2[1])
m3 = np.mean(pdp_x3[1])
print("mean(PDP_1) =", np.mean(pdp_x1[1]))
print("mean(PDP_2) =", np.mean(pdp_x2[1]))
print("mean(PDP_2) =", np.mean(pdp_x3[1]))
print("mean abs PDP_1-ybar", np.mean(np.abs(pdp_x1[1] - m1)))
print("mean abs PDP_2-ybar", np.mean(np.abs(pdp_x2[1] - m2)))
print("mean abs PDP_3-ybar", np.mean(np.abs(pdp_x3[1] - m3)))
explainer = shap.TreeExplainer(rf, data=X,
feature_perturbation='interventional')
shap_values = explainer.shap_values(X, check_additivity=False)
shapavg = np.mean(shap_values, axis=0)
print("SHAP avg x1,x2,x3 =", shapavg)
shapimp = np.mean(np.abs(shap_values), axis=0)
print("SHAP avg |x1|,|x2|,|x3| =", shapimp)
fig, axes = plt.subplots(1,2,figsize=(5.5,2.8))
x1_color = '#1E88E5'
x2_color = 'orange'
x3_color = '#A22396'
axes[0].plot(pdp_x1[0], pdp_x1[1], '.', markersize=1, c=x1_color, label='$FPD_1$', alpha=1)
axes[0].plot(pdp_x2[0], pdp_x2[1], '.', markersize=1, c=x2_color, label='$FPD_2$', alpha=1)
axes[0].plot(pdp_x3[0], pdp_x3[1], '.', markersize=1, c=x3_color, label='$FPD_3$', alpha=1)
axes[0].text(0, 75, f"$\\bar{{y}}={np.mean(y):.1f}$", fontsize=13)
axes[0].set_xticks([0,2,4,6,8,10])
axes[0].set_xlabel("$x_1, x_2, x_3$", fontsize=10)
axes[0].set_ylabel("y")
axes[0].set_yticks([0, 25, 50, 75, 100, 125, 150])
axes[0].set_ylim(-10,160)
axes[0].set_title(f"(a) Friedman FPD")
axes[0].spines['top'].set_linewidth(.5)
axes[0].spines['right'].set_linewidth(.5)
axes[0].spines['left'].set_linewidth(.5)
axes[0].spines['bottom'].set_linewidth(.5)
axes[0].spines['top'].set_color('none')
axes[0].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[0].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=10)
# axes[0].legend(fontsize=10)
#axes[1].plot(shap_values)
shap.dependence_plot("x1", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x1_color)
shap.dependence_plot("x2", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x2_color)
shap.dependence_plot("x3", shap_values, X,
interaction_index=None, ax=axes[1], dot_size=4,
show=False, alpha=.5, color=x3_color)
axes[1].set_xticks([0,2,4,6,8,10])
axes[1].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
axes[1].set_ylim(-95,110)
axes[1].set_title("(b) SHAP")
axes[1].set_ylabel("SHAP values", fontsize=11)
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[1].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12)
if False:
df_x1 = pd.read_csv("../images/x1_ale.csv")
df_x2 = pd.read_csv("../images/x2_ale.csv")
df_x3 = pd.read_csv("../images/x3_ale.csv")
axes[2].plot(df_x1['x.values'],df_x1['f.values'],'.',color=x1_color,markersize=2)
axes[2].plot(df_x2['x.values'],df_x2['f.values'],'.',color=x2_color,markersize=2)
axes[2].plot(df_x3['x.values'],df_x3['f.values'],'.',color=x3_color,markersize=2)
axes[2].set_title("(c) ALE")
# axes[2].set_ylabel("y", fontsize=12)
axes[2].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
axes[2].set_ylim(-95,110)
# axes[2].tick_params(axis='both', which='major', labelsize=10)
axes[2].set_xticks([0,2,4,6,8,10])
axes[2].spines['top'].set_linewidth(.5)
axes[2].spines['right'].set_linewidth(.5)
axes[2].spines['left'].set_linewidth(.5)
axes[2].spines['bottom'].set_linewidth(.5)
axes[2].spines['top'].set_color('none')
axes[2].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[2].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12, loc='upper left')
plot_stratpd(X, y, "x1", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x1_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
plot_stratpd(X, y, "x2", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x2_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
plot_stratpd(X, y, "x3", "y", ax=axes[3], pdp_marker_size=1,
pdp_marker_color=x3_color,
show_x_counts=False, n_trials=1, show_slope_lines=False)
axes[3].set_xticks([0,2,4,6,8,10])
axes[3].set_ylim(-20,160)
axes[3].set_yticks([0, 25, 50, 75, 100, 125, 150])
axes[3].set_xlabel("$x_1, x_2, x_3$", fontsize=12)
# axes[3].set_ylabel("y", fontsize=12)
axes[3].set_title("(d) StratPD")
axes[3].spines['top'].set_linewidth(.5)
axes[3].spines['right'].set_linewidth(.5)
axes[3].spines['left'].set_linewidth(.5)
axes[3].spines['bottom'].set_linewidth(.5)
axes[3].spines['top'].set_color('none')
axes[3].spines['right'].set_color('none')
x1_patch = mpatches.Patch(color=x1_color, label='$x_1$')
x2_patch = mpatches.Patch(color=x2_color, label='$x_2$')
x3_patch = mpatches.Patch(color=x3_color, label='$x_3$')
axes[3].legend(handles=[x1_patch,x2_patch,x3_patch], fontsize=12)
plt.tight_layout()
plt.savefig("../images/FPD-SHAP-PD.pdf")
plt.show() | 0.357231 | 0.577138 |
import os
import re
from ..google_api import (
GDrive,
GSheets,
GCellType,
)
from .spreadsheet import Spreadsheet
_google_file_id_regex = re.compile(r'^1[a-zA-Z0-9_-]{43}$')
class GSheetsReader(Spreadsheet):
def __init__(self, file, **kwargs):
"""Read a Google Sheets spreadsheet.
This class simply provides a convenience for reading information
from Google spreadsheets. It is not registered as a :class:`~msl.io.base_io.Reader`
because the information in a spreadsheet is unstructured and therefore
one cannot generalize how to parse a spreadsheet to create a
:class:`~msl.io.base_io.Root`.
Parameters
----------
file : :class:`str`
The ID or path of a Google Sheets spreadsheet.
**kwargs
All keyword arguments are passed to :class:`~msl.io.google_api.GSheets`.
Examples
--------
>>> from msl.io import GSheetsReader # doctest: +SKIP
>>> sheets = GSheetsReader('Google Drive/registers/equipment.gsheet') # doctest: +SKIP
>>> sheets = GSheetsReader('1TI3pM-534SZ5DQTEZ-7HCI04648f8ZpLGbfHWJu9FSo') # doctest: +SKIP
"""
super(GSheetsReader, self).__init__(file)
if not kwargs.get('is_read_only', True):
raise ValueError('Must instantiate {} in read-only mode'.format(self.__class__.__name__))
path, ext = os.path.splitext(file)
folders, _ = os.path.split(path)
if ext or folders or not _google_file_id_regex.match(path):
self._spreadsheet_id = GDrive(**kwargs).file_id(path, mime_type=GSheets.MIME_TYPE)
else:
self._spreadsheet_id = path
self._gsheets = GSheets(**kwargs)
self._cached_sheet_name = None
def read(self, cell=None, sheet=None, as_datetime=True):
"""Read values from the Google Sheets spreadsheet.
Parameters
----------
cell : :class:`str`, optional
The cell(s) to read. For example, ``C9`` will return a single value
and ``C9:G20`` will return all values in the specified range. If not
specified then returns all values in the specified `sheet`.
sheet : :class:`str`, optional
The name of the sheet to read the value(s) from. If there is only
one sheet in the spreadsheet then you do not need to specify the name
of the sheet.
as_datetime : :class:`bool`, optional
Whether dates should be returned as :class:`~datetime.datetime` or
:class:`~datetime.date` objects. If :data:`False` then dates are
returned as a string in the format of the spreadsheet cell.
Returns
-------
The value(s) of the requested cell(s).
Examples
--------
.. invisible-code-block: pycon
>>> SKIP_IF_NO_GOOGLE_SHEETS_READ_TOKEN()
>>> from msl.io import GSheetsReader
>>> sheets = GSheetsReader('1TI3pM-534SZ5DQTEZ-7vCI04l48f8ZpLGbfEWJuCFSo', is_corporate_account=False)
>>> sheets.read()
[('temperature', 'humidity'), (20.33, 49.82), (20.23, 46.06), (20.41, 47.06), (20.29, 48.32)]
>>> sheets.read('B2')
49.82
>>> sheets.read('A:A')
[('temperature',), (20.33,), (20.23,), (20.41,), (20.29,)]
>>> sheets.read('A1:B1')
[('temperature', 'humidity')]
>>> sheets.read('A2:B4')
[(20.33, 49.82), (20.23, 46.06), (20.41, 47.06)]
"""
if not sheet:
if self._cached_sheet_name:
sheet = self._cached_sheet_name
else:
names = self.sheet_names()
if len(names) != 1:
raise ValueError('{!r} contains the following sheets:\n {}\n'
'You must specify the name of the sheet to read'
.format(self._file, ', '.join(repr(n) for n in names)))
sheet = names[0]
self._cached_sheet_name = sheet
if cell:
ranges = '{}!{}'.format(sheet, cell)
else:
ranges = sheet
cells = self._gsheets.cells(self._spreadsheet_id, ranges=ranges)
if sheet not in cells:
raise ValueError('There is no sheet named {!r} in {!r}'.format(sheet, self._file))
values = []
for row in cells[sheet]:
row_values = []
for item in row:
if item.type == GCellType.DATE:
value = GSheets.to_datetime(item.value).date() if as_datetime else item.formatted
elif item.type == GCellType.DATE_TIME:
value = GSheets.to_datetime(item.value) if as_datetime else item.formatted
else:
value = item.value
row_values.append(value)
values.append(tuple(row_values))
if not cell:
return values
if ':' not in cell:
if values:
return values[0][0]
return
return values
def sheet_names(self):
"""Get the names of all sheets in the Google Sheets spreadsheet.
Returns
-------
:class:`tuple` of :class:`str`
The names of all sheets.
"""
return self._gsheets.sheet_names(self._spreadsheet_id) | msl/io/readers/gsheets.py | import os
import re
from ..google_api import (
GDrive,
GSheets,
GCellType,
)
from .spreadsheet import Spreadsheet
_google_file_id_regex = re.compile(r'^1[a-zA-Z0-9_-]{43}$')
class GSheetsReader(Spreadsheet):
def __init__(self, file, **kwargs):
"""Read a Google Sheets spreadsheet.
This class simply provides a convenience for reading information
from Google spreadsheets. It is not registered as a :class:`~msl.io.base_io.Reader`
because the information in a spreadsheet is unstructured and therefore
one cannot generalize how to parse a spreadsheet to create a
:class:`~msl.io.base_io.Root`.
Parameters
----------
file : :class:`str`
The ID or path of a Google Sheets spreadsheet.
**kwargs
All keyword arguments are passed to :class:`~msl.io.google_api.GSheets`.
Examples
--------
>>> from msl.io import GSheetsReader # doctest: +SKIP
>>> sheets = GSheetsReader('Google Drive/registers/equipment.gsheet') # doctest: +SKIP
>>> sheets = GSheetsReader('1TI3pM-534SZ5DQTEZ-7HCI04648f8ZpLGbfHWJu9FSo') # doctest: +SKIP
"""
super(GSheetsReader, self).__init__(file)
if not kwargs.get('is_read_only', True):
raise ValueError('Must instantiate {} in read-only mode'.format(self.__class__.__name__))
path, ext = os.path.splitext(file)
folders, _ = os.path.split(path)
if ext or folders or not _google_file_id_regex.match(path):
self._spreadsheet_id = GDrive(**kwargs).file_id(path, mime_type=GSheets.MIME_TYPE)
else:
self._spreadsheet_id = path
self._gsheets = GSheets(**kwargs)
self._cached_sheet_name = None
def read(self, cell=None, sheet=None, as_datetime=True):
"""Read values from the Google Sheets spreadsheet.
Parameters
----------
cell : :class:`str`, optional
The cell(s) to read. For example, ``C9`` will return a single value
and ``C9:G20`` will return all values in the specified range. If not
specified then returns all values in the specified `sheet`.
sheet : :class:`str`, optional
The name of the sheet to read the value(s) from. If there is only
one sheet in the spreadsheet then you do not need to specify the name
of the sheet.
as_datetime : :class:`bool`, optional
Whether dates should be returned as :class:`~datetime.datetime` or
:class:`~datetime.date` objects. If :data:`False` then dates are
returned as a string in the format of the spreadsheet cell.
Returns
-------
The value(s) of the requested cell(s).
Examples
--------
.. invisible-code-block: pycon
>>> SKIP_IF_NO_GOOGLE_SHEETS_READ_TOKEN()
>>> from msl.io import GSheetsReader
>>> sheets = GSheetsReader('1TI3pM-534SZ5DQTEZ-7vCI04l48f8ZpLGbfEWJuCFSo', is_corporate_account=False)
>>> sheets.read()
[('temperature', 'humidity'), (20.33, 49.82), (20.23, 46.06), (20.41, 47.06), (20.29, 48.32)]
>>> sheets.read('B2')
49.82
>>> sheets.read('A:A')
[('temperature',), (20.33,), (20.23,), (20.41,), (20.29,)]
>>> sheets.read('A1:B1')
[('temperature', 'humidity')]
>>> sheets.read('A2:B4')
[(20.33, 49.82), (20.23, 46.06), (20.41, 47.06)]
"""
if not sheet:
if self._cached_sheet_name:
sheet = self._cached_sheet_name
else:
names = self.sheet_names()
if len(names) != 1:
raise ValueError('{!r} contains the following sheets:\n {}\n'
'You must specify the name of the sheet to read'
.format(self._file, ', '.join(repr(n) for n in names)))
sheet = names[0]
self._cached_sheet_name = sheet
if cell:
ranges = '{}!{}'.format(sheet, cell)
else:
ranges = sheet
cells = self._gsheets.cells(self._spreadsheet_id, ranges=ranges)
if sheet not in cells:
raise ValueError('There is no sheet named {!r} in {!r}'.format(sheet, self._file))
values = []
for row in cells[sheet]:
row_values = []
for item in row:
if item.type == GCellType.DATE:
value = GSheets.to_datetime(item.value).date() if as_datetime else item.formatted
elif item.type == GCellType.DATE_TIME:
value = GSheets.to_datetime(item.value) if as_datetime else item.formatted
else:
value = item.value
row_values.append(value)
values.append(tuple(row_values))
if not cell:
return values
if ':' not in cell:
if values:
return values[0][0]
return
return values
def sheet_names(self):
"""Get the names of all sheets in the Google Sheets spreadsheet.
Returns
-------
:class:`tuple` of :class:`str`
The names of all sheets.
"""
return self._gsheets.sheet_names(self._spreadsheet_id) | 0.800458 | 0.385722 |
import pyCardDeck
from typing import List
class Gamer:
def __init__(self, name: str):
self.hand = []
self.name = name
def __str__(self):
return self.name
class GamePlace:
def __init__(self, gamers: List[Gamer]):
self.deck = pyCardDeck.Deck(
cards=generate_deck(),
name='Poker deck',
reshuffle=False)
self.gamers = gamers
self.table_cards = []
print("Created a table with {} gamers".format(len(self.gamers)))
def Cantrell_Draw(self):
"""
Basic Five card game structure
"""
print("Starting a round of Cantrell Draw")
self.deck.shuffle()
self.deal_cards(5)
#Imagine the first round of betting happened here after cards are drawn and visible to gamer
self.draw1()
self.fold() #gamers who folded the hands after initial cards were distributed
self.remove()
self.after_the_draw()
# Imagine post-turn, pre-draw1 logic for betting here
self.reset() #to update the gamers with hands
self.fold()
self.remove()
self.after_the_draw()
# Imagine some more betting and winner decision here
self.cleanup()
def deal_cards(self, number: int):
"""
Dealer will go through all available gamers and deal them x number of cards.
:param number: How many cards to deal
:type number: int
"""
for _ in range(0, number):
for gamer in self.gamers:
card = self.deck.draw()
gamer.hand.append(card)
print("Dealt {} to gamer {}".format(card, gamer)
def draw1(self,number):
"""
After the first round of betting, if more than one gamer exist on the hand or table than a draw occurs where gamer selects his/her number of cards which he/she wants to replace
"""
# Burn a card/cards
if gamers>1:
self.number = int(input("how many card/cards you want to replace?"))
burned = self.deck.draw()
self.deck.discard(burned)
print("Burned a card/cards: {}".format(burned))
for _ in range(0, number):
card = self.deck.draw()
self.table_cards.append(card)
print("New card on the table: {}".format(card))
else:
print("Game as ended because of only 1 gamer or no gamer exists on the table")
def fold(self, gamer_id):
if gamer_id not in self._gamer_ids:
raise ValueError("Unknown gamer id")
self._folder_ids.add(gamer_id)
def remove(self, gamer_id):
self.fold(gamer_id)
self._dead_gamer_ids.add(gamer_id)
def reset(self):
self._folder_ids = set(self._dead_gamer_ids)
def after_the_draw(self):
"""
A second "after the draw" betting round occurs beginning with the gamer to the dealer's left or else beginning with the gamer who opened the first round (the latter is common when antes are used instead of blinds). This is followed by a showdown
"""
if gamers>1:
self.5card()
#check for the highest holding
else:
print("only 1 gamer and the winner is declared")
def cleanup(self):
"""
Cleans up the table to gather all the cards back
"""
for gamer in self.gamers:
for card in gamer.hand:
self.deck.discard(card)
for card in self.table_cards:
self.deck.discard(card)
self.deck.shuffle_back()
print("Cleanup done")
def generate_deck() -> List[PokerCard]:
"""
Function that generates the deck, instead of writing down 50 cards, we use iteration
to generate the cards for use
:return: List with all 50 poker playing cards
:rtype: List[PokerCard]
"""
suits = ['Hearts', 'Diamonds', 'Clubs', 'Spades']
ranks = {'A': 'Ace',
'2': 'Two',
'3': 'Three',
'4': 'Four',
'5': 'Five',
'6': 'Six',
'7': 'Seven',
'8': 'Eight',
'9': 'Nine',
'10': 'Ten',
'J': 'Jack',
'Q': 'Queen',
'K': 'King'}
cards = []
for suit in suits:
for rank, name in ranks.items():
cards.append(PokerCard(suit, rank, name))
print('Generated deck of cards for the table')
return cards
if __name__ == '__main__':
table = GamePlace([Gamer("Jack"), Gamer("John"), Gamer("Peter")])
table.Cantrell_Draw() | task1.py | import pyCardDeck
from typing import List
class Gamer:
def __init__(self, name: str):
self.hand = []
self.name = name
def __str__(self):
return self.name
class GamePlace:
def __init__(self, gamers: List[Gamer]):
self.deck = pyCardDeck.Deck(
cards=generate_deck(),
name='Poker deck',
reshuffle=False)
self.gamers = gamers
self.table_cards = []
print("Created a table with {} gamers".format(len(self.gamers)))
def Cantrell_Draw(self):
"""
Basic Five card game structure
"""
print("Starting a round of Cantrell Draw")
self.deck.shuffle()
self.deal_cards(5)
#Imagine the first round of betting happened here after cards are drawn and visible to gamer
self.draw1()
self.fold() #gamers who folded the hands after initial cards were distributed
self.remove()
self.after_the_draw()
# Imagine post-turn, pre-draw1 logic for betting here
self.reset() #to update the gamers with hands
self.fold()
self.remove()
self.after_the_draw()
# Imagine some more betting and winner decision here
self.cleanup()
def deal_cards(self, number: int):
"""
Dealer will go through all available gamers and deal them x number of cards.
:param number: How many cards to deal
:type number: int
"""
for _ in range(0, number):
for gamer in self.gamers:
card = self.deck.draw()
gamer.hand.append(card)
print("Dealt {} to gamer {}".format(card, gamer)
def draw1(self,number):
"""
After the first round of betting, if more than one gamer exist on the hand or table than a draw occurs where gamer selects his/her number of cards which he/she wants to replace
"""
# Burn a card/cards
if gamers>1:
self.number = int(input("how many card/cards you want to replace?"))
burned = self.deck.draw()
self.deck.discard(burned)
print("Burned a card/cards: {}".format(burned))
for _ in range(0, number):
card = self.deck.draw()
self.table_cards.append(card)
print("New card on the table: {}".format(card))
else:
print("Game as ended because of only 1 gamer or no gamer exists on the table")
def fold(self, gamer_id):
if gamer_id not in self._gamer_ids:
raise ValueError("Unknown gamer id")
self._folder_ids.add(gamer_id)
def remove(self, gamer_id):
self.fold(gamer_id)
self._dead_gamer_ids.add(gamer_id)
def reset(self):
self._folder_ids = set(self._dead_gamer_ids)
def after_the_draw(self):
"""
A second "after the draw" betting round occurs beginning with the gamer to the dealer's left or else beginning with the gamer who opened the first round (the latter is common when antes are used instead of blinds). This is followed by a showdown
"""
if gamers>1:
self.5card()
#check for the highest holding
else:
print("only 1 gamer and the winner is declared")
def cleanup(self):
"""
Cleans up the table to gather all the cards back
"""
for gamer in self.gamers:
for card in gamer.hand:
self.deck.discard(card)
for card in self.table_cards:
self.deck.discard(card)
self.deck.shuffle_back()
print("Cleanup done")
def generate_deck() -> List[PokerCard]:
"""
Function that generates the deck, instead of writing down 50 cards, we use iteration
to generate the cards for use
:return: List with all 50 poker playing cards
:rtype: List[PokerCard]
"""
suits = ['Hearts', 'Diamonds', 'Clubs', 'Spades']
ranks = {'A': 'Ace',
'2': 'Two',
'3': 'Three',
'4': 'Four',
'5': 'Five',
'6': 'Six',
'7': 'Seven',
'8': 'Eight',
'9': 'Nine',
'10': 'Ten',
'J': 'Jack',
'Q': 'Queen',
'K': 'King'}
cards = []
for suit in suits:
for rank, name in ranks.items():
cards.append(PokerCard(suit, rank, name))
print('Generated deck of cards for the table')
return cards
if __name__ == '__main__':
table = GamePlace([Gamer("Jack"), Gamer("John"), Gamer("Peter")])
table.Cantrell_Draw() | 0.487307 | 0.251203 |
import sys
import pandas as pd
data = pd.read_csv("schedule_u.csv")
classes = []
with open("whitelist.txt", "r") as infile:
for line in infile:
classes.append(line.strip())
new_csv_16 = pd.DataFrame(columns=["Student ID", "Trimester"] + classes)
new_csv_17 = pd.DataFrame(columns=["Student ID", "Trimester"] + classes)
new_csv_18 = pd.DataFrame(columns=["Student ID", "Trimester"] + classes)
heading = list(new_csv_16)
l = len(data[data["schedule_year"] == 2016])
for index, row in data[data["schedule_year"] == 2016].iterrows():
print(index, '/', l, end='\r')
student = row["student_number"]
tri = row["term"]
if len(new_csv_16[(new_csv_16["Student ID"] == student) & (new_csv_16["Trimester"] == tri)]) == 0:
df = pd.DataFrame([[student, tri] + [0 for i in range(len(classes))]], columns=["Student ID", "Trimester"] + classes)
new_csv_16 = new_csv_16.append(df)
course = row["course_title"][:6].strip()
if course in heading:
new_csv_16.loc[(new_csv_16["Student ID"] == student) & (new_csv_16["Trimester"] == tri), course] += 1
new_csv_16.to_csv('classes_2016.csv', index=False)
print(new_csv_16.head())
l = len(data[data["schedule_year"] == 2017])
for index, row in data[data["schedule_year"] == 2017].iterrows():
print(index, '/', l, end='\r')
student = row["student_number"]
tri = row["term"]
if len(new_csv_17[(new_csv_17["Student ID"] == student) & (new_csv_17["Trimester"] == tri)]) == 0:
df = pd.DataFrame([[student, tri] + [0 for i in range(len(classes))]], columns=["Student ID", "Trimester"] + classes)
new_csv_17 = new_csv_17.append(df)
course = row["course_title"][:6].strip()
if course in heading:
new_csv_17.loc[(new_csv_17["Student ID"] == student) & (new_csv_17["Trimester"] == tri), course] += 1
new_csv_17.to_csv('classes_2017.csv', index=False)
print(new_csv_17.head())
l = len(data[data["schedule_year"] == 2018])
for index, row in data[data["schedule_year"] == 2018].iterrows():
print(index, '/', l, end='\r')
student = row["student_number"]
tri = row["term"]
if len(new_csv_18[(new_csv_18["Student ID"] == student) & (new_csv_18["Trimester"] == tri)]) == 0:
df = pd.DataFrame([[student, tri] + [0 for i in range(len(classes))]], columns=["Student ID", "Trimester"] + classes)
new_csv_18 = new_csv_18.append(df)
course = row["course_title"][:6].strip()
if course in heading:
new_csv_18.loc[(new_csv_18["Student ID"] == student) & (new_csv_18["Trimester"] == tri), course] += 1
new_csv_18.to_csv('classes_2018.csv', index=False)
print(new_csv_18.head()) | schedule_parser.py | import sys
import pandas as pd
data = pd.read_csv("schedule_u.csv")
classes = []
with open("whitelist.txt", "r") as infile:
for line in infile:
classes.append(line.strip())
new_csv_16 = pd.DataFrame(columns=["Student ID", "Trimester"] + classes)
new_csv_17 = pd.DataFrame(columns=["Student ID", "Trimester"] + classes)
new_csv_18 = pd.DataFrame(columns=["Student ID", "Trimester"] + classes)
heading = list(new_csv_16)
l = len(data[data["schedule_year"] == 2016])
for index, row in data[data["schedule_year"] == 2016].iterrows():
print(index, '/', l, end='\r')
student = row["student_number"]
tri = row["term"]
if len(new_csv_16[(new_csv_16["Student ID"] == student) & (new_csv_16["Trimester"] == tri)]) == 0:
df = pd.DataFrame([[student, tri] + [0 for i in range(len(classes))]], columns=["Student ID", "Trimester"] + classes)
new_csv_16 = new_csv_16.append(df)
course = row["course_title"][:6].strip()
if course in heading:
new_csv_16.loc[(new_csv_16["Student ID"] == student) & (new_csv_16["Trimester"] == tri), course] += 1
new_csv_16.to_csv('classes_2016.csv', index=False)
print(new_csv_16.head())
l = len(data[data["schedule_year"] == 2017])
for index, row in data[data["schedule_year"] == 2017].iterrows():
print(index, '/', l, end='\r')
student = row["student_number"]
tri = row["term"]
if len(new_csv_17[(new_csv_17["Student ID"] == student) & (new_csv_17["Trimester"] == tri)]) == 0:
df = pd.DataFrame([[student, tri] + [0 for i in range(len(classes))]], columns=["Student ID", "Trimester"] + classes)
new_csv_17 = new_csv_17.append(df)
course = row["course_title"][:6].strip()
if course in heading:
new_csv_17.loc[(new_csv_17["Student ID"] == student) & (new_csv_17["Trimester"] == tri), course] += 1
new_csv_17.to_csv('classes_2017.csv', index=False)
print(new_csv_17.head())
l = len(data[data["schedule_year"] == 2018])
for index, row in data[data["schedule_year"] == 2018].iterrows():
print(index, '/', l, end='\r')
student = row["student_number"]
tri = row["term"]
if len(new_csv_18[(new_csv_18["Student ID"] == student) & (new_csv_18["Trimester"] == tri)]) == 0:
df = pd.DataFrame([[student, tri] + [0 for i in range(len(classes))]], columns=["Student ID", "Trimester"] + classes)
new_csv_18 = new_csv_18.append(df)
course = row["course_title"][:6].strip()
if course in heading:
new_csv_18.loc[(new_csv_18["Student ID"] == student) & (new_csv_18["Trimester"] == tri), course] += 1
new_csv_18.to_csv('classes_2018.csv', index=False)
print(new_csv_18.head()) | 0.071827 | 0.184657 |
from threading import Thread
from nanomsg import Socket, SUB, PUSH, SUB_SUBSCRIBE, SOL_SOCKET, RCVTIMEO
from datetime import datetime
import os
from .datastruct import Event
class ClientMq(object):
def __init__(self, config, ui_event_engine, outgoing_queue):
self._ui_event_engine = ui_event_engine
self._outgoing_queue = outgoing_queue
self._config = config
self._active = False
self._thread = Thread(target=self._run)
def _run(self):
# os.system("taskset -cp 5 %d " % os.getpid())
while self._active:
try:
# response msg from server
msgin = self._recv_sock.recv(flags=0)
msgin = msgin.decode("utf-8")
if msgin is not None and msgin.index('|') > 0:
# print('client rec msg:',msgin,'at ', datetime.now())
if msgin[-1] == '\0':
msgin = msgin[:-1]
if msgin[-1] == '\x00':
msgin = msgin[:-1]
m = Event()
m.deserialize(msgin)
self._ui_event_engine.put(m)
except Exception as e:
pass
try:
# request, qry msg to server
msgout = self._outgoing_queue.get(False)
print('outgoing get msg,begin send', msgout, datetime.now())
# self._send_sock.send(bytes(msgout,"ascii"), flags=0)
self._send_sock.send(msgout, flags=1)
print('outgoing end send', msgout, datetime.now())
except Exception as e:
pass
def start(self, timer=True):
"""
start the mq thread
"""
self._recv_sock = Socket(SUB)
self._send_sock = Socket(PUSH)
self._monitor_sock = Socket(SUB)
# print(os.getpid())
self._recv_sock.connect(self._config['serverpub_url'])
self._recv_sock.set_string_option(SUB, SUB_SUBSCRIBE, '')
self._recv_sock.set_int_option(SOL_SOCKET, RCVTIMEO, 100)
self._send_sock.connect(self._config['serverpull_url'])
self._monitor_sock.connect(self._config['serversub_url'])
self._active = True
if not self._thread.isAlive():
self._thread.start()
def stop(self):
"""
stop the mq thread
"""
self._active = False
if self._thread.isAlive():
self._thread.join() | source/common/client_mq.py | from threading import Thread
from nanomsg import Socket, SUB, PUSH, SUB_SUBSCRIBE, SOL_SOCKET, RCVTIMEO
from datetime import datetime
import os
from .datastruct import Event
class ClientMq(object):
def __init__(self, config, ui_event_engine, outgoing_queue):
self._ui_event_engine = ui_event_engine
self._outgoing_queue = outgoing_queue
self._config = config
self._active = False
self._thread = Thread(target=self._run)
def _run(self):
# os.system("taskset -cp 5 %d " % os.getpid())
while self._active:
try:
# response msg from server
msgin = self._recv_sock.recv(flags=0)
msgin = msgin.decode("utf-8")
if msgin is not None and msgin.index('|') > 0:
# print('client rec msg:',msgin,'at ', datetime.now())
if msgin[-1] == '\0':
msgin = msgin[:-1]
if msgin[-1] == '\x00':
msgin = msgin[:-1]
m = Event()
m.deserialize(msgin)
self._ui_event_engine.put(m)
except Exception as e:
pass
try:
# request, qry msg to server
msgout = self._outgoing_queue.get(False)
print('outgoing get msg,begin send', msgout, datetime.now())
# self._send_sock.send(bytes(msgout,"ascii"), flags=0)
self._send_sock.send(msgout, flags=1)
print('outgoing end send', msgout, datetime.now())
except Exception as e:
pass
def start(self, timer=True):
"""
start the mq thread
"""
self._recv_sock = Socket(SUB)
self._send_sock = Socket(PUSH)
self._monitor_sock = Socket(SUB)
# print(os.getpid())
self._recv_sock.connect(self._config['serverpub_url'])
self._recv_sock.set_string_option(SUB, SUB_SUBSCRIBE, '')
self._recv_sock.set_int_option(SOL_SOCKET, RCVTIMEO, 100)
self._send_sock.connect(self._config['serverpull_url'])
self._monitor_sock.connect(self._config['serversub_url'])
self._active = True
if not self._thread.isAlive():
self._thread.start()
def stop(self):
"""
stop the mq thread
"""
self._active = False
if self._thread.isAlive():
self._thread.join() | 0.198958 | 0.041307 |
from . import computer
def part1():
"""
The software draws tiles to the screen with output instructions:
every three output instructions specify the x position (distance from the left), y position (distance from the top), and tile id.
0 is an empty tile. No game object appears in this tile.
1 is a wall tile. Walls are indestructible barriers.
2 is a block tile. Blocks can be broken by the ball.
3 is a horizontal paddle tile. The paddle is indestructible.
4 is a ball tile. The ball moves diagonally and bounces off objects.
How many block tiles are on the screen when the game exits?
"""
program = read_input()
output_values = []
computer.run_program(program, [], output_values)
blocks = output_values[2::3].count(2)
print(blocks)
def part2():
"""
Memory address 0 represents the number of quarters that have been inserted; set it to 2 to play for free.
The arcade cabinet has a joystick that can move left and right.
If the joystick is in the neutral position, provide 0.
If the joystick is tilted to the left, provide -1.
If the joystick is tilted to the right, provide 1.
When three output instructions specify X=-1, Y=0, the third output instruction is the new score.
What is your score after the last block is broken?
"""
program = read_input()
program[0] = 2
input_values = []
arcade = computer.get_computer(program, input_values)
score = 0
ball_x = 0
paddle_x = 0
while True:
x = next(arcade)
if type(x) == list: # When the game is finished and the program halts, the computer outputs the program state
break
y = next(arcade)
v = next(arcade)
if v == 3:
paddle_x = x
elif v == 4: # Every tick, the last value to be updated is that of the ball
ball_x = x
next_input = 0
if ball_x < paddle_x:
next_input = -1
elif ball_x > paddle_x:
next_input = 1
input_values.append(next_input)
if x == -1:
score = v
print(score)
def read_input():
with open('input/day13.txt') as input_file:
return [int(x) for x in input_file.readline().split(',')] | 2019/days/day13.py | from . import computer
def part1():
"""
The software draws tiles to the screen with output instructions:
every three output instructions specify the x position (distance from the left), y position (distance from the top), and tile id.
0 is an empty tile. No game object appears in this tile.
1 is a wall tile. Walls are indestructible barriers.
2 is a block tile. Blocks can be broken by the ball.
3 is a horizontal paddle tile. The paddle is indestructible.
4 is a ball tile. The ball moves diagonally and bounces off objects.
How many block tiles are on the screen when the game exits?
"""
program = read_input()
output_values = []
computer.run_program(program, [], output_values)
blocks = output_values[2::3].count(2)
print(blocks)
def part2():
"""
Memory address 0 represents the number of quarters that have been inserted; set it to 2 to play for free.
The arcade cabinet has a joystick that can move left and right.
If the joystick is in the neutral position, provide 0.
If the joystick is tilted to the left, provide -1.
If the joystick is tilted to the right, provide 1.
When three output instructions specify X=-1, Y=0, the third output instruction is the new score.
What is your score after the last block is broken?
"""
program = read_input()
program[0] = 2
input_values = []
arcade = computer.get_computer(program, input_values)
score = 0
ball_x = 0
paddle_x = 0
while True:
x = next(arcade)
if type(x) == list: # When the game is finished and the program halts, the computer outputs the program state
break
y = next(arcade)
v = next(arcade)
if v == 3:
paddle_x = x
elif v == 4: # Every tick, the last value to be updated is that of the ball
ball_x = x
next_input = 0
if ball_x < paddle_x:
next_input = -1
elif ball_x > paddle_x:
next_input = 1
input_values.append(next_input)
if x == -1:
score = v
print(score)
def read_input():
with open('input/day13.txt') as input_file:
return [int(x) for x in input_file.readline().split(',')] | 0.627723 | 0.752967 |
import subprocess
from typing import Dict, List, Optional, Union
class Stockfish:
""" Integrates the Stockfish chess engine with Python. """
def __init__(
self, path: str = None, depth: int = 2, param: Dict[str, Union[str, int]] = None
):
if param is None:
param = {}
if path is None:
path = "stockfish"
self.stockfish = subprocess.Popen(
path, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
self.depth = str(depth)
# Tell the engine to use the UCI format
self.__put("uci")
default_param = {
"Write Debug Log": "false",
"Contempt": 0,
"Min Split Depth": 0,
"Threads": 1,
"Ponder": "false",
"Hash": 16,
"MultiPV": 1,
"Skill Level": 20,
"Move Overhead": 30,
"Minimum Thinking Time": 20,
"Slow Mover": 80,
"UCI_Chess960": "false",
}
default_param.update(param)
self.param = default_param
for name, value in list(default_param.items()):
self.__set_option(name, value)
self.__start_new_game()
def change_thinking_time(milliseconds: int) -> None:
""" Changes the thinking time of the chess engine in milliseconds """
self.__set_option("Minimum Thinking Time", milliseconds)
def change_difficulty(self, difficulty: int) -> None:
""" Changes the difficulty of the engine, STILL UNSURE WHAT THIS MEANS """
self.__set_option("Skill Level", difficulty)
def __start_new_game(self) -> None:
self.__put("ucinewgame")
self.__isready()
def __put(self, command: str) -> None:
""" Internal function for writing commands to the engine """
self.stockfish.stdin.write(command + "\n")
self.stockfish.stdin.flush()
def __set_option(self, optionname: str, value: Union[str, int]) -> None:
""" Will set an option in the chess engine """
self.__put("setoption name %s value %s" % (optionname, str(value)))
stdout = self.__isready()
if stdout.find("No such") >= 0:
print("Unable to set option %s" % optionname)
def __isready(self) -> str:
""" Tests to see if the engine is ready to go """
self.__put("isready")
while True:
text = self.stockfish.stdout.readline().strip()
if text == "readyok":
return text
def __go(self):
""" will search through the number of plies designated by self.depth"""
self.__put("go depth %s" % self.depth)
@staticmethod
def __convert_move_list_to_str(moves: List[str]) -> str:
result = ""
for move in moves:
result += move + " "
return result.strip()
def set_position(self, moves: List[str] = None) -> None:
""" Sets current board positions.
Args:
moves: A list of moves to set this position on the board.
Must be in full algebraic notation.
example:
['e2e4', 'e7e5']
"""
if moves is None:
moves = []
self.__put(
"position startpos moves %s" % self.__convert_move_list_to_str(moves)
)
def set_fen_position(self, fen_position: str) -> None:
""" Set the board game using FEN notation """
self.__put("position fen " + fen_position)
def get_best_move(self) -> Optional[str]:
""" Get best move with current position on the board.
Returns:
A string of moves in algebraic notation or None, if it's a checkmate.
"""
self.__go()
while True:
text = self.stockfish.stdout.readline().strip()
split_text = text.split(" ")
if split_text[0] == "bestmove":
if split_text[1] == "(none)":
return None
return split_text[1]
def is_move_correct(self, move_value: str) -> bool:
""" Checks new move.
Args:
move_value: New move value in algebraic notation.
Returns:
True, if new move is correct, else False.
"""
self.__put("go depth 1 searchmoves %s" % move_value)
while True:
text = self.stockfish.stdout.readline().strip()
split_text = text.split(" ")
if split_text[0] == "bestmove":
if split_text[1] == "(none)":
return False
return True
def __del__(self) -> None:
""" Ends the stockfish program """
self.stockfish.kill() | stockfish.py |
import subprocess
from typing import Dict, List, Optional, Union
class Stockfish:
""" Integrates the Stockfish chess engine with Python. """
def __init__(
self, path: str = None, depth: int = 2, param: Dict[str, Union[str, int]] = None
):
if param is None:
param = {}
if path is None:
path = "stockfish"
self.stockfish = subprocess.Popen(
path, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
self.depth = str(depth)
# Tell the engine to use the UCI format
self.__put("uci")
default_param = {
"Write Debug Log": "false",
"Contempt": 0,
"Min Split Depth": 0,
"Threads": 1,
"Ponder": "false",
"Hash": 16,
"MultiPV": 1,
"Skill Level": 20,
"Move Overhead": 30,
"Minimum Thinking Time": 20,
"Slow Mover": 80,
"UCI_Chess960": "false",
}
default_param.update(param)
self.param = default_param
for name, value in list(default_param.items()):
self.__set_option(name, value)
self.__start_new_game()
def change_thinking_time(milliseconds: int) -> None:
""" Changes the thinking time of the chess engine in milliseconds """
self.__set_option("Minimum Thinking Time", milliseconds)
def change_difficulty(self, difficulty: int) -> None:
""" Changes the difficulty of the engine, STILL UNSURE WHAT THIS MEANS """
self.__set_option("Skill Level", difficulty)
def __start_new_game(self) -> None:
self.__put("ucinewgame")
self.__isready()
def __put(self, command: str) -> None:
""" Internal function for writing commands to the engine """
self.stockfish.stdin.write(command + "\n")
self.stockfish.stdin.flush()
def __set_option(self, optionname: str, value: Union[str, int]) -> None:
""" Will set an option in the chess engine """
self.__put("setoption name %s value %s" % (optionname, str(value)))
stdout = self.__isready()
if stdout.find("No such") >= 0:
print("Unable to set option %s" % optionname)
def __isready(self) -> str:
""" Tests to see if the engine is ready to go """
self.__put("isready")
while True:
text = self.stockfish.stdout.readline().strip()
if text == "readyok":
return text
def __go(self):
""" will search through the number of plies designated by self.depth"""
self.__put("go depth %s" % self.depth)
@staticmethod
def __convert_move_list_to_str(moves: List[str]) -> str:
result = ""
for move in moves:
result += move + " "
return result.strip()
def set_position(self, moves: List[str] = None) -> None:
""" Sets current board positions.
Args:
moves: A list of moves to set this position on the board.
Must be in full algebraic notation.
example:
['e2e4', 'e7e5']
"""
if moves is None:
moves = []
self.__put(
"position startpos moves %s" % self.__convert_move_list_to_str(moves)
)
def set_fen_position(self, fen_position: str) -> None:
""" Set the board game using FEN notation """
self.__put("position fen " + fen_position)
def get_best_move(self) -> Optional[str]:
""" Get best move with current position on the board.
Returns:
A string of moves in algebraic notation or None, if it's a checkmate.
"""
self.__go()
while True:
text = self.stockfish.stdout.readline().strip()
split_text = text.split(" ")
if split_text[0] == "bestmove":
if split_text[1] == "(none)":
return None
return split_text[1]
def is_move_correct(self, move_value: str) -> bool:
""" Checks new move.
Args:
move_value: New move value in algebraic notation.
Returns:
True, if new move is correct, else False.
"""
self.__put("go depth 1 searchmoves %s" % move_value)
while True:
text = self.stockfish.stdout.readline().strip()
split_text = text.split(" ")
if split_text[0] == "bestmove":
if split_text[1] == "(none)":
return False
return True
def __del__(self) -> None:
""" Ends the stockfish program """
self.stockfish.kill() | 0.884514 | 0.352843 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='test_proto/image-mood-classification.proto',
package='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX',
syntax='proto3',
serialized_pb=_b('\n*test_proto/image-mood-classification.proto\x12 YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX\"8\n\x0bImageTagSet\x12\r\n\x05image\x18\x01 \x03(\x03\x12\x0b\n\x03tag\x18\x02 \x03(\t\x12\r\n\x05score\x18\x03 \x03(\x01\x32q\n\x05Model\x12h\n\x08\x63lassify\x12-.YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet\x1a-.YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSetb\x06proto3')
)
_IMAGETAGSET = _descriptor.Descriptor(
name='ImageTagSet',
full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet.image', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag', full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet.tag', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet.score', index=2,
number=3, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=136,
)
DESCRIPTOR.message_types_by_name['ImageTagSet'] = _IMAGETAGSET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImageTagSet = _reflection.GeneratedProtocolMessageType('ImageTagSet', (_message.Message,), dict(
DESCRIPTOR = _IMAGETAGSET,
__module__ = 'test_proto.image_mood_classification_pb2'
# @@protoc_insertion_point(class_scope:YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet)
))
_sym_db.RegisterMessage(ImageTagSet)
# @@protoc_insertion_point(module_scope) | tests/fixtures/image_mood_classification_100_proto_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='test_proto/image-mood-classification.proto',
package='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX',
syntax='proto3',
serialized_pb=_b('\n*test_proto/image-mood-classification.proto\x12 YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX\"8\n\x0bImageTagSet\x12\r\n\x05image\x18\x01 \x03(\x03\x12\x0b\n\x03tag\x18\x02 \x03(\t\x12\r\n\x05score\x18\x03 \x03(\x01\x32q\n\x05Model\x12h\n\x08\x63lassify\x12-.YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet\x1a-.YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSetb\x06proto3')
)
_IMAGETAGSET = _descriptor.Descriptor(
name='ImageTagSet',
full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet.image', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag', full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet.tag', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet.score', index=2,
number=3, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=136,
)
DESCRIPTOR.message_types_by_name['ImageTagSet'] = _IMAGETAGSET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImageTagSet = _reflection.GeneratedProtocolMessageType('ImageTagSet', (_message.Message,), dict(
DESCRIPTOR = _IMAGETAGSET,
__module__ = 'test_proto.image_mood_classification_pb2'
# @@protoc_insertion_point(class_scope:YKhGXjKWHYsPwKJFfEPnmoHOkDkPKBxX.ImageTagSet)
))
_sym_db.RegisterMessage(ImageTagSet)
# @@protoc_insertion_point(module_scope) | 0.167866 | 0.131731 |
from pylab import *
from rnn import *
from brnn import *
from common import *
def example(hidden=10, examples=1000, epochs=100, eta=0.001, rnn=None, binary=False, progress=True, embedded=False):
import reber
data_source = reber.get_n_embedded_examples if embedded else reber.get_n_examples
DATA = map((lambda x: 2*x-1) if binary else (lambda x: x), map(np.array, data_source(examples)))
if rnn is None:
rnn = BRNN(7, hidden, 7) if binary else SRNN(7, hidden, 7)
pbar = gen_pbar() if progress else (lambda x: x)
costs = rnn.train_session(DATA, eta, iter(pbar(xrange(epochs))))
#validate:
eta=0
DATA = map((lambda x: 2*x-1) if binary else (lambda x: x), map(np.array, data_source(examples)))
pbar = gen_pbar() if progress else (lambda x: x)
validation_costs = rnn.train_session(DATA, eta, iter(pbar(xrange(epochs))))
return rnn, costs, validation_costs
def compare_embedded(hidden=100, embedded=True, examples=1000, epochs=100):
eta_srnn = 0.001
_, costs_srnn, val_costs_srnn = example(hidden, examples, epochs, eta_srnn, binary=False, embedded=embedded)
_, costs_brnn, val_costs_brnn = example(hidden, examples, epochs, 1, binary=True, embedded=embedded)
return (costs_srnn, costs_brnn), (val_costs_srnn, val_costs_brnn)
def triple_comparison():
import reber
#data_source = lambda ex: map(lambda x: 2*x-1,map(np.array,reber.get_n_embedded_examples(ex)))
data_source = lambda ex: map(lambda x: 2*x-1,map(np.array,reber.get_n_examples(ex)))
word_len = 7
examples = 200
epochs = 100
hidden = 100
data = data_source(examples)
rnn = BRNN(word_len, hidden, word_len)
pbar = gen_pbar()
#train
costs = rnn.train_session(data, 1, iter(pbar(xrange(epochs))))
#validate / measure performance
#get new data
data = data_source(examples)
#pbar = gen_pbar()
#lazy_method_costs = rnn.train_session(DATA, 0, pbar(xrange(epochs)))
funcs = dict(prob=rnn.fprop, det=rnn.fprop_multi_single_sample, resample_per_layer_avg=rnn.fprop_per_layer_avg)
error_bins = dict(prob=[], det=[], resample_per_layer_avg=[])
for k in funcs:
for ins, outs in data:
funcs[k](ins)
error_bins[k].append(rnn.calculate_cost(outs))
return rnn, costs, error_bins
def experiment():
import progressbar as pb
hiddens = arange(1,101)
pbar = gen_pbar()
residuals = array([ example(h, 500, 50, 1, None, True, False)[1] for h in pbar(hiddens) ])
return hiddens,residuals
def text(fname='aiw.txt', hidden=10, seq_length=10, epochs=10, eta=1, rnn=None, binary=False, progress=True):
# Data I/O
data = open(fname, 'r').read()[:-1] # Use this source file as input for RNN #remove trailing newline
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('Data has %d characters, %d unique.' % (data_size, vocab_size))
char_to_ix = dict([(ch, i) for i, ch in enumerate(chars)])
ix_to_char = dict([(i, ch) for i, ch in enumerate(chars)])
def one_hot(v):
return np.eye(vocab_size)[v]
def text_to_repr(text):
if binary:
return -1 + 2*one_hot([char_to_ix[ch] for ch in text])
else:
return one_hot([char_to_ix[ch] for ch in text])
if rnn is None:
if binary:
rnn = BRNN(vocab_size, hidden, vocab_size)
else:
rnn = SRNN(vocab_size, hidden, vocab_size)
dataset = [(text_to_repr(data[j :j+seq_length]),
text_to_repr(data[j+1:j+seq_length] + data[(j+seq_length+1)%data_size])) for j in xrange(0,data_size,seq_length)]
pbar = gen_pbar() if progress else (lambda x: x)
costs = rnn.train_session(dataset, eta, xrange(epochs), iter(pbar(xrange(epochs*len(dataset)))))
return rnn, costs, dataset
def gen_pbar():
import progressbar as pb
return pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(marker=pb.RotatingMarker()),' ',pb.ETA(),' time to learn']) | example.py | from pylab import *
from rnn import *
from brnn import *
from common import *
def example(hidden=10, examples=1000, epochs=100, eta=0.001, rnn=None, binary=False, progress=True, embedded=False):
import reber
data_source = reber.get_n_embedded_examples if embedded else reber.get_n_examples
DATA = map((lambda x: 2*x-1) if binary else (lambda x: x), map(np.array, data_source(examples)))
if rnn is None:
rnn = BRNN(7, hidden, 7) if binary else SRNN(7, hidden, 7)
pbar = gen_pbar() if progress else (lambda x: x)
costs = rnn.train_session(DATA, eta, iter(pbar(xrange(epochs))))
#validate:
eta=0
DATA = map((lambda x: 2*x-1) if binary else (lambda x: x), map(np.array, data_source(examples)))
pbar = gen_pbar() if progress else (lambda x: x)
validation_costs = rnn.train_session(DATA, eta, iter(pbar(xrange(epochs))))
return rnn, costs, validation_costs
def compare_embedded(hidden=100, embedded=True, examples=1000, epochs=100):
eta_srnn = 0.001
_, costs_srnn, val_costs_srnn = example(hidden, examples, epochs, eta_srnn, binary=False, embedded=embedded)
_, costs_brnn, val_costs_brnn = example(hidden, examples, epochs, 1, binary=True, embedded=embedded)
return (costs_srnn, costs_brnn), (val_costs_srnn, val_costs_brnn)
def triple_comparison():
import reber
#data_source = lambda ex: map(lambda x: 2*x-1,map(np.array,reber.get_n_embedded_examples(ex)))
data_source = lambda ex: map(lambda x: 2*x-1,map(np.array,reber.get_n_examples(ex)))
word_len = 7
examples = 200
epochs = 100
hidden = 100
data = data_source(examples)
rnn = BRNN(word_len, hidden, word_len)
pbar = gen_pbar()
#train
costs = rnn.train_session(data, 1, iter(pbar(xrange(epochs))))
#validate / measure performance
#get new data
data = data_source(examples)
#pbar = gen_pbar()
#lazy_method_costs = rnn.train_session(DATA, 0, pbar(xrange(epochs)))
funcs = dict(prob=rnn.fprop, det=rnn.fprop_multi_single_sample, resample_per_layer_avg=rnn.fprop_per_layer_avg)
error_bins = dict(prob=[], det=[], resample_per_layer_avg=[])
for k in funcs:
for ins, outs in data:
funcs[k](ins)
error_bins[k].append(rnn.calculate_cost(outs))
return rnn, costs, error_bins
def experiment():
import progressbar as pb
hiddens = arange(1,101)
pbar = gen_pbar()
residuals = array([ example(h, 500, 50, 1, None, True, False)[1] for h in pbar(hiddens) ])
return hiddens,residuals
def text(fname='aiw.txt', hidden=10, seq_length=10, epochs=10, eta=1, rnn=None, binary=False, progress=True):
# Data I/O
data = open(fname, 'r').read()[:-1] # Use this source file as input for RNN #remove trailing newline
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('Data has %d characters, %d unique.' % (data_size, vocab_size))
char_to_ix = dict([(ch, i) for i, ch in enumerate(chars)])
ix_to_char = dict([(i, ch) for i, ch in enumerate(chars)])
def one_hot(v):
return np.eye(vocab_size)[v]
def text_to_repr(text):
if binary:
return -1 + 2*one_hot([char_to_ix[ch] for ch in text])
else:
return one_hot([char_to_ix[ch] for ch in text])
if rnn is None:
if binary:
rnn = BRNN(vocab_size, hidden, vocab_size)
else:
rnn = SRNN(vocab_size, hidden, vocab_size)
dataset = [(text_to_repr(data[j :j+seq_length]),
text_to_repr(data[j+1:j+seq_length] + data[(j+seq_length+1)%data_size])) for j in xrange(0,data_size,seq_length)]
pbar = gen_pbar() if progress else (lambda x: x)
costs = rnn.train_session(dataset, eta, xrange(epochs), iter(pbar(xrange(epochs*len(dataset)))))
return rnn, costs, dataset
def gen_pbar():
import progressbar as pb
return pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(marker=pb.RotatingMarker()),' ',pb.ETA(),' time to learn']) | 0.516108 | 0.393589 |
import numpy as np
class LinearRegressionPy:
pass
class LinearRegressionNp:
def __init__(self, solver="normal_eq"):
self.solver = solver
self.theta = None
self.intercept_ = None
self.coef_ = None
def fit(self, X, y):
if self.solver == "normal_eq":
self._fit_normal(X, y)
elif self.solver == "pseudo_inv":
self._fit_pseudo_inv(X, y)
elif self.solver == "ols":
self._fit_ols(X, y)
elif self.solver == "gd":
self._fit_gd(X, y)
elif self.solver == "sgd":
self._fit_sgd(X, y)
elif self.solver == "bgd":
pass
else:
print(f"Solver {self.solver} non reconnu")
return
self._update_parameters()
def predict(self, X):
X_1 = self._add_constant(X)
return X_1.dot(self.theta)
def _add_constant(self, X):
return np.c_[np.ones((X.shape[0], 1)), X]
# Fit functions
def _fit_normal(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.inv(X_1.T.dot(X_1)).dot(X_1.T.dot(y))
def _fit_pseudo_inv(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.pinv(X_1).dot(y)
def _fit_ols(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.lstsq(X_1, y, rcond=1e-6)[0]
def _fit_gd(self, X, y, learning_rate=0.01, n_iter=10000):
X_1 = self._add_constant(X)
y = y.reshape(-1,1)
self.theta = np.random.randn(X_1.shape[1], 1)
for i in range(n_iter):
gradient = (2/X_1.shape[0])*X_1.T.dot(X_1.dot(self.theta)-y)
self.theta = self.theta - learning_rate*gradient
self.theta = self.theta.flatten()
def _fit_sgd(self, X, y, t0=800, lr0=0.1, n_epochs=500):
X_1 = self._add_constant(X)
y = y.reshape(-1,1)
self.theta = np.random.randn(X_1.shape[1], 1)
for epoch in range(n_epochs):
random_index = np.random.randint(X_1.shape[0])
X_i = X_1[random_index:random_index+1]
y_i = y[random_index:random_index+1]
gradient = 2*X_i.T.dot(X_i.dot(self.theta)-y_i)
learning_rate = lr0*(t0/(t0+epoch))
self.theta = self.theta - learning_rate*gradient
self.theta = self.theta.flatten()
def _fit_bgd(self, X, y, learning_rate=0.01, n_iter=10000):
pass
def _update_parameters(self):
self.intercept_ = self.theta[0]
self.coef_ = self.theta[1:]
class RidgeNp:
def __init__(self, solver="normal_eq", alpha=1):
self.solver = solver
self.alpha = alpha
self.theta = None
self.intercept_ = None
self.coef_ = None
def fit(self, X, y):
if self.solver == "normal_eq":
self._fit_normal(X, y)
elif self.solver == "gd":
pass
elif self.solver == "sgd":
pass
elif self.solver == "bgd":
pass
else:
print(f"Solver {self.solver} non reconnu")
return
self._update_parameters()
def predict(self, X):
X_1 = self._add_constant(X)
return X_1.dot(self.theta)
def _fit_normal(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.inv(X_1.T.dot(X_1)+self.alpha*np.identity(X_1.shape[1])).dot(X_1.T.dot(y))
def _add_constant(self, X):
return np.c_[np.ones((X.shape[0], 1)), X]
def _update_parameters(self):
self.intercept_ = self.theta[0]
self.coef_ = self.theta[1:] | plb_ml_lib.py | import numpy as np
class LinearRegressionPy:
pass
class LinearRegressionNp:
def __init__(self, solver="normal_eq"):
self.solver = solver
self.theta = None
self.intercept_ = None
self.coef_ = None
def fit(self, X, y):
if self.solver == "normal_eq":
self._fit_normal(X, y)
elif self.solver == "pseudo_inv":
self._fit_pseudo_inv(X, y)
elif self.solver == "ols":
self._fit_ols(X, y)
elif self.solver == "gd":
self._fit_gd(X, y)
elif self.solver == "sgd":
self._fit_sgd(X, y)
elif self.solver == "bgd":
pass
else:
print(f"Solver {self.solver} non reconnu")
return
self._update_parameters()
def predict(self, X):
X_1 = self._add_constant(X)
return X_1.dot(self.theta)
def _add_constant(self, X):
return np.c_[np.ones((X.shape[0], 1)), X]
# Fit functions
def _fit_normal(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.inv(X_1.T.dot(X_1)).dot(X_1.T.dot(y))
def _fit_pseudo_inv(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.pinv(X_1).dot(y)
def _fit_ols(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.lstsq(X_1, y, rcond=1e-6)[0]
def _fit_gd(self, X, y, learning_rate=0.01, n_iter=10000):
X_1 = self._add_constant(X)
y = y.reshape(-1,1)
self.theta = np.random.randn(X_1.shape[1], 1)
for i in range(n_iter):
gradient = (2/X_1.shape[0])*X_1.T.dot(X_1.dot(self.theta)-y)
self.theta = self.theta - learning_rate*gradient
self.theta = self.theta.flatten()
def _fit_sgd(self, X, y, t0=800, lr0=0.1, n_epochs=500):
X_1 = self._add_constant(X)
y = y.reshape(-1,1)
self.theta = np.random.randn(X_1.shape[1], 1)
for epoch in range(n_epochs):
random_index = np.random.randint(X_1.shape[0])
X_i = X_1[random_index:random_index+1]
y_i = y[random_index:random_index+1]
gradient = 2*X_i.T.dot(X_i.dot(self.theta)-y_i)
learning_rate = lr0*(t0/(t0+epoch))
self.theta = self.theta - learning_rate*gradient
self.theta = self.theta.flatten()
def _fit_bgd(self, X, y, learning_rate=0.01, n_iter=10000):
pass
def _update_parameters(self):
self.intercept_ = self.theta[0]
self.coef_ = self.theta[1:]
class RidgeNp:
def __init__(self, solver="normal_eq", alpha=1):
self.solver = solver
self.alpha = alpha
self.theta = None
self.intercept_ = None
self.coef_ = None
def fit(self, X, y):
if self.solver == "normal_eq":
self._fit_normal(X, y)
elif self.solver == "gd":
pass
elif self.solver == "sgd":
pass
elif self.solver == "bgd":
pass
else:
print(f"Solver {self.solver} non reconnu")
return
self._update_parameters()
def predict(self, X):
X_1 = self._add_constant(X)
return X_1.dot(self.theta)
def _fit_normal(self, X, y):
X_1 = self._add_constant(X)
self.theta = np.linalg.inv(X_1.T.dot(X_1)+self.alpha*np.identity(X_1.shape[1])).dot(X_1.T.dot(y))
def _add_constant(self, X):
return np.c_[np.ones((X.shape[0], 1)), X]
def _update_parameters(self):
self.intercept_ = self.theta[0]
self.coef_ = self.theta[1:] | 0.749087 | 0.613468 |
import requests
from time import sleep
import time
import psutil
import os
NVF = 'test'
IP = '192.168.1.117'
DB = 'ns_1'
USER = 'test'
PASSWORD = '<PASSWORD>'
TIME = 1
interface = "lo"
cpu = 0.0
ul=0.00
dl=0.00
t0 = time.time()
upload=psutil.net_io_counters(pernic=True)[interface][0]
download=psutil.net_io_counters(pernic=True)[interface][1]
up_down=(upload,download)
while True:
for proc in psutil.process_iter():
process = psutil.Process(proc.pid)
pname = process.name()
if pname == "pdsch_enodeb" or pname == "pdsch_ue":
percent = process.cpu_percent(interval=0.1)
if str(percent) != '0.0':
cpu = 'cpu' + ' value=%s' % percent
disk = 'disk' + ' value=%s' % psutil.disk_usage('/').percent
ram = 'ram' + ' value=%s' % round(process.memory_percent(), 2)
last_up_down = up_down
upload = psutil.net_io_counters(pernic=True)[interface][0]
download = psutil.net_io_counters(pernic=True)[interface][1]
t1 = time.time()
up_down = (upload, download)
try:
ul, dl = [(now - last) / (t1 - t0) / 1024.0
for now, last in zip(up_down, last_up_down)]
t0 = time.time()
except:
pass
if dl > 0.1 or ul >= 0.1:
time.sleep(0.75)
os.system('cls')
#print('UL: {:0.2f} kB/s \n'.format(ul) + 'DL: {:0.2f} kB/s'.format(dl))
network_in = 'network_in_' + NVF + ' value=%s' % ul
network_out = 'network_out_' + NVF + ' value=%s' % dl
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=cpu)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=disk)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=ram)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=network_in)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=network_out)
sleep(TIME) | examples/scripts/files/monitorVNF.py |
import requests
from time import sleep
import time
import psutil
import os
NVF = 'test'
IP = '192.168.1.117'
DB = 'ns_1'
USER = 'test'
PASSWORD = '<PASSWORD>'
TIME = 1
interface = "lo"
cpu = 0.0
ul=0.00
dl=0.00
t0 = time.time()
upload=psutil.net_io_counters(pernic=True)[interface][0]
download=psutil.net_io_counters(pernic=True)[interface][1]
up_down=(upload,download)
while True:
for proc in psutil.process_iter():
process = psutil.Process(proc.pid)
pname = process.name()
if pname == "pdsch_enodeb" or pname == "pdsch_ue":
percent = process.cpu_percent(interval=0.1)
if str(percent) != '0.0':
cpu = 'cpu' + ' value=%s' % percent
disk = 'disk' + ' value=%s' % psutil.disk_usage('/').percent
ram = 'ram' + ' value=%s' % round(process.memory_percent(), 2)
last_up_down = up_down
upload = psutil.net_io_counters(pernic=True)[interface][0]
download = psutil.net_io_counters(pernic=True)[interface][1]
t1 = time.time()
up_down = (upload, download)
try:
ul, dl = [(now - last) / (t1 - t0) / 1024.0
for now, last in zip(up_down, last_up_down)]
t0 = time.time()
except:
pass
if dl > 0.1 or ul >= 0.1:
time.sleep(0.75)
os.system('cls')
#print('UL: {:0.2f} kB/s \n'.format(ul) + 'DL: {:0.2f} kB/s'.format(dl))
network_in = 'network_in_' + NVF + ' value=%s' % ul
network_out = 'network_out_' + NVF + ' value=%s' % dl
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=cpu)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=disk)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=ram)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=network_in)
requests.post("http://%s:8086/write?db=%s" % (IP, DB), auth=(USER, PASSWORD), data=network_out)
sleep(TIME) | 0.121516 | 0.086246 |
begin_unit
comment|'# Copyright 2016 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
DECL|variable|GROUP_NAME
name|'GROUP_NAME'
op|'='
string|"'spice'"
newline|'\n'
DECL|variable|spice_opt_group
name|'spice_opt_group'
op|'='
name|'cfg'
op|'.'
name|'OptGroup'
op|'('
name|'GROUP_NAME'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|enabled_opt
name|'enabled_opt'
op|'='
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'enabled'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nEnable spice related features.\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|agent_enabled_opt
name|'agent_enabled_opt'
op|'='
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'agent_enabled'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'True'
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nEnable the spice guest agent support.\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|html5proxy_base_url_opt
name|'html5proxy_base_url_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'html5proxy_base_url'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'http://127.0.0.1:6082/spice_auto.html'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nLocation of spice HTML5 console proxy, in the form\n"http://127.0.0.1:6082/spice_auto.html"\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|html5proxy_host_opt
name|'html5proxy_host_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'html5proxy_host'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'0.0.0.0'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nHost on which to listen for incoming requests\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|html5proxy_port_opt
name|'html5proxy_port_opt'
op|'='
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'html5proxy_port'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'6082'
op|','
nl|'\n'
DECL|variable|min
name|'min'
op|'='
number|'1'
op|','
nl|'\n'
DECL|variable|max
name|'max'
op|'='
number|'65535'
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nPort on which to listen for incoming requests\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|server_listen_opt
name|'server_listen_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'server_listen'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'127.0.0.1'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nIP address on which instance spice server should listen\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|server_proxyclient_address_opt
name|'server_proxyclient_address_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'server_proxyclient_address'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'127.0.0.1'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nThe address to which proxy clients (like nova-spicehtml5proxy) should connect\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|keymap_opt
name|'keymap_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'keymap'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'en-us'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nKeymap for spice\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|ALL_OPTS
name|'ALL_OPTS'
op|'='
op|'['
name|'html5proxy_base_url_opt'
op|','
nl|'\n'
name|'server_listen_opt'
op|','
nl|'\n'
name|'server_proxyclient_address_opt'
op|','
nl|'\n'
name|'enabled_opt'
op|','
nl|'\n'
name|'agent_enabled_opt'
op|','
nl|'\n'
name|'keymap_opt'
op|','
nl|'\n'
name|'html5proxy_host_opt'
op|','
nl|'\n'
name|'html5proxy_port_opt'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|CLI_OPTS
name|'CLI_OPTS'
op|'='
op|'['
name|'html5proxy_host_opt'
op|','
nl|'\n'
name|'html5proxy_port_opt'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|register_opts
name|'def'
name|'register_opts'
op|'('
name|'conf'
op|')'
op|':'
newline|'\n'
indent|' '
name|'conf'
op|'.'
name|'register_opts'
op|'('
name|'ALL_OPTS'
op|','
name|'group'
op|'='
name|'spice_opt_group'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|register_cli_opts
dedent|''
name|'def'
name|'register_cli_opts'
op|'('
name|'conf'
op|')'
op|':'
newline|'\n'
indent|' '
name|'conf'
op|'.'
name|'register_cli_opts'
op|'('
name|'CLI_OPTS'
op|','
name|'group'
op|'='
name|'spice_opt_group'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|list_opts
dedent|''
name|'def'
name|'list_opts'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
name|'spice_opt_group'
op|':'
name|'ALL_OPTS'
op|'}'
newline|'\n'
dedent|''
endmarker|''
end_unit | nova/conf/spice.py | begin_unit
comment|'# Copyright 2016 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
DECL|variable|GROUP_NAME
name|'GROUP_NAME'
op|'='
string|"'spice'"
newline|'\n'
DECL|variable|spice_opt_group
name|'spice_opt_group'
op|'='
name|'cfg'
op|'.'
name|'OptGroup'
op|'('
name|'GROUP_NAME'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|enabled_opt
name|'enabled_opt'
op|'='
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'enabled'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nEnable spice related features.\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|agent_enabled_opt
name|'agent_enabled_opt'
op|'='
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'agent_enabled'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'True'
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nEnable the spice guest agent support.\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|html5proxy_base_url_opt
name|'html5proxy_base_url_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'html5proxy_base_url'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'http://127.0.0.1:6082/spice_auto.html'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nLocation of spice HTML5 console proxy, in the form\n"http://127.0.0.1:6082/spice_auto.html"\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|html5proxy_host_opt
name|'html5proxy_host_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'html5proxy_host'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'0.0.0.0'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nHost on which to listen for incoming requests\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|html5proxy_port_opt
name|'html5proxy_port_opt'
op|'='
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'html5proxy_port'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'6082'
op|','
nl|'\n'
DECL|variable|min
name|'min'
op|'='
number|'1'
op|','
nl|'\n'
DECL|variable|max
name|'max'
op|'='
number|'65535'
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nPort on which to listen for incoming requests\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|server_listen_opt
name|'server_listen_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'server_listen'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'127.0.0.1'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nIP address on which instance spice server should listen\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|server_proxyclient_address_opt
name|'server_proxyclient_address_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'server_proxyclient_address'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'127.0.0.1'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nThe address to which proxy clients (like nova-spicehtml5proxy) should connect\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|keymap_opt
name|'keymap_opt'
op|'='
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'keymap'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'en-us'"
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nKeymap for spice\n"""'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|ALL_OPTS
name|'ALL_OPTS'
op|'='
op|'['
name|'html5proxy_base_url_opt'
op|','
nl|'\n'
name|'server_listen_opt'
op|','
nl|'\n'
name|'server_proxyclient_address_opt'
op|','
nl|'\n'
name|'enabled_opt'
op|','
nl|'\n'
name|'agent_enabled_opt'
op|','
nl|'\n'
name|'keymap_opt'
op|','
nl|'\n'
name|'html5proxy_host_opt'
op|','
nl|'\n'
name|'html5proxy_port_opt'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|CLI_OPTS
name|'CLI_OPTS'
op|'='
op|'['
name|'html5proxy_host_opt'
op|','
nl|'\n'
name|'html5proxy_port_opt'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|register_opts
name|'def'
name|'register_opts'
op|'('
name|'conf'
op|')'
op|':'
newline|'\n'
indent|' '
name|'conf'
op|'.'
name|'register_opts'
op|'('
name|'ALL_OPTS'
op|','
name|'group'
op|'='
name|'spice_opt_group'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|register_cli_opts
dedent|''
name|'def'
name|'register_cli_opts'
op|'('
name|'conf'
op|')'
op|':'
newline|'\n'
indent|' '
name|'conf'
op|'.'
name|'register_cli_opts'
op|'('
name|'CLI_OPTS'
op|','
name|'group'
op|'='
name|'spice_opt_group'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|list_opts
dedent|''
name|'def'
name|'list_opts'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
name|'spice_opt_group'
op|':'
name|'ALL_OPTS'
op|'}'
newline|'\n'
dedent|''
endmarker|''
end_unit | 0.578448 | 0.077134 |
from odps.compat import irange
from odps.tests.core import TestBase, tn
from odps.tunnel import CompressOption
TEST_PARTED_VOLUME_NAME = tn('pyodps_test_p_volume')
TEST_FS_VOLUME_NAME = tn('pyodps_test_fs_volume')
TEST_PARTITION_NAME = 'pyodps_test_partition'
TEST_FILE_NAME = 'test_output_file'
TEST_BLOCK_SIZE = 1048500
TEST_MODULUS = 251
class Test(TestBase):
def tearDown(self):
if self.odps.exist_volume(TEST_PARTED_VOLUME_NAME):
self.odps.delete_volume(TEST_PARTED_VOLUME_NAME)
if self.odps.exist_volume(TEST_FS_VOLUME_NAME):
self.odps.delete_volume(TEST_FS_VOLUME_NAME)
super(Test, self).tearDown()
@staticmethod
def _gen_byte_block():
return bytes(bytearray([iid % TEST_MODULUS for iid in irange(TEST_BLOCK_SIZE)]))
def _get_test_partition(self):
if self.odps.exist_volume(TEST_PARTED_VOLUME_NAME):
self.odps.delete_volume(TEST_PARTED_VOLUME_NAME)
self.odps.create_parted_volume(TEST_PARTED_VOLUME_NAME)
return self.odps.get_volume_partition(TEST_PARTED_VOLUME_NAME, TEST_PARTITION_NAME)
def _get_test_fs(self):
if self.odps.exist_volume(TEST_FS_VOLUME_NAME):
self.odps.delete_volume(TEST_FS_VOLUME_NAME)
self.odps.create_fs_volume(TEST_FS_VOLUME_NAME)
return self.odps.get_volume(TEST_FS_VOLUME_NAME)
def testTextUploadDownload(self):
text_content = 'Life is short, \r\n Java is tedious. \n\n\r\nI use PyODPS. \n\n'
expect_lines = ['Life is short, \n', ' Java is tedious. \n', '\n', '\n', 'I use PyODPS. \n', '\n']
partition = self._get_test_partition()
with partition.open_writer() as writer:
writer.write(TEST_FILE_NAME, text_content)
with partition.open_reader(TEST_FILE_NAME) as reader:
actual_lines = [line for line in reader]
assert expect_lines == actual_lines
def testRawUploadDownloadGreenlet(self):
block = self._gen_byte_block()
partition = self._get_test_partition()
with partition.open_writer() as writer:
writer.write(TEST_FILE_NAME, block)
with partition.open_reader(TEST_FILE_NAME) as reader:
assert reader.read() == block
def testRawUploadDownloadThread(self):
from odps.tunnel import io
io._FORCE_THREAD = True
block = self._gen_byte_block()
partition = self._get_test_partition()
with partition.open_writer() as writer:
writer.write(TEST_FILE_NAME, block)
with partition.open_reader(TEST_FILE_NAME) as reader:
assert reader.read() == block
def testZLibUploadDownload(self):
block = self._gen_byte_block()
comp_option = CompressOption(level=9)
partition = self._get_test_partition()
with partition.open_writer(compress_option=comp_option) as writer:
writer.write(TEST_FILE_NAME, block, compress=True)
with partition.open_reader(TEST_FILE_NAME, compress_option=comp_option) as reader:
assert reader.read() == block
def testFSRawUploadDownload(self):
block = self._gen_byte_block()
vol = self._get_test_fs()
with vol.open_writer(TEST_FILE_NAME) as writer:
writer.write(block)
with vol.open_reader(TEST_FILE_NAME) as reader:
assert reader.read() == block
def testFSZLibUploadDownload(self):
block = self._gen_byte_block()
comp_option = CompressOption(level=9)
vol = self._get_test_fs()
with vol.open_writer(TEST_FILE_NAME, compress_option=comp_option) as writer:
writer.write(block)
with vol.open_reader(TEST_FILE_NAME, compress_option=comp_option) as reader:
parts = []
while True:
b = reader.read(10003)
if not b:
break
parts.append(b)
self.assertEqual(bytes().join(parts), block) | odps/tunnel/tests/test_volumetunnel.py |
from odps.compat import irange
from odps.tests.core import TestBase, tn
from odps.tunnel import CompressOption
TEST_PARTED_VOLUME_NAME = tn('pyodps_test_p_volume')
TEST_FS_VOLUME_NAME = tn('pyodps_test_fs_volume')
TEST_PARTITION_NAME = 'pyodps_test_partition'
TEST_FILE_NAME = 'test_output_file'
TEST_BLOCK_SIZE = 1048500
TEST_MODULUS = 251
class Test(TestBase):
def tearDown(self):
if self.odps.exist_volume(TEST_PARTED_VOLUME_NAME):
self.odps.delete_volume(TEST_PARTED_VOLUME_NAME)
if self.odps.exist_volume(TEST_FS_VOLUME_NAME):
self.odps.delete_volume(TEST_FS_VOLUME_NAME)
super(Test, self).tearDown()
@staticmethod
def _gen_byte_block():
return bytes(bytearray([iid % TEST_MODULUS for iid in irange(TEST_BLOCK_SIZE)]))
def _get_test_partition(self):
if self.odps.exist_volume(TEST_PARTED_VOLUME_NAME):
self.odps.delete_volume(TEST_PARTED_VOLUME_NAME)
self.odps.create_parted_volume(TEST_PARTED_VOLUME_NAME)
return self.odps.get_volume_partition(TEST_PARTED_VOLUME_NAME, TEST_PARTITION_NAME)
def _get_test_fs(self):
if self.odps.exist_volume(TEST_FS_VOLUME_NAME):
self.odps.delete_volume(TEST_FS_VOLUME_NAME)
self.odps.create_fs_volume(TEST_FS_VOLUME_NAME)
return self.odps.get_volume(TEST_FS_VOLUME_NAME)
def testTextUploadDownload(self):
text_content = 'Life is short, \r\n Java is tedious. \n\n\r\nI use PyODPS. \n\n'
expect_lines = ['Life is short, \n', ' Java is tedious. \n', '\n', '\n', 'I use PyODPS. \n', '\n']
partition = self._get_test_partition()
with partition.open_writer() as writer:
writer.write(TEST_FILE_NAME, text_content)
with partition.open_reader(TEST_FILE_NAME) as reader:
actual_lines = [line for line in reader]
assert expect_lines == actual_lines
def testRawUploadDownloadGreenlet(self):
block = self._gen_byte_block()
partition = self._get_test_partition()
with partition.open_writer() as writer:
writer.write(TEST_FILE_NAME, block)
with partition.open_reader(TEST_FILE_NAME) as reader:
assert reader.read() == block
def testRawUploadDownloadThread(self):
from odps.tunnel import io
io._FORCE_THREAD = True
block = self._gen_byte_block()
partition = self._get_test_partition()
with partition.open_writer() as writer:
writer.write(TEST_FILE_NAME, block)
with partition.open_reader(TEST_FILE_NAME) as reader:
assert reader.read() == block
def testZLibUploadDownload(self):
block = self._gen_byte_block()
comp_option = CompressOption(level=9)
partition = self._get_test_partition()
with partition.open_writer(compress_option=comp_option) as writer:
writer.write(TEST_FILE_NAME, block, compress=True)
with partition.open_reader(TEST_FILE_NAME, compress_option=comp_option) as reader:
assert reader.read() == block
def testFSRawUploadDownload(self):
block = self._gen_byte_block()
vol = self._get_test_fs()
with vol.open_writer(TEST_FILE_NAME) as writer:
writer.write(block)
with vol.open_reader(TEST_FILE_NAME) as reader:
assert reader.read() == block
def testFSZLibUploadDownload(self):
block = self._gen_byte_block()
comp_option = CompressOption(level=9)
vol = self._get_test_fs()
with vol.open_writer(TEST_FILE_NAME, compress_option=comp_option) as writer:
writer.write(block)
with vol.open_reader(TEST_FILE_NAME, compress_option=comp_option) as reader:
parts = []
while True:
b = reader.read(10003)
if not b:
break
parts.append(b)
self.assertEqual(bytes().join(parts), block) | 0.502197 | 0.34679 |