hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70ae444d51b1c010b6a015103dbd35afc54919e
| 1,352
|
py
|
Python
|
python/old/hiddenBlock.py
|
BenOsborn/Cerci
|
5785ae0c9db8a88a5ac8d91aed29cdf0c0c7854a
|
[
"Apache-2.0"
] | null | null | null |
python/old/hiddenBlock.py
|
BenOsborn/Cerci
|
5785ae0c9db8a88a5ac8d91aed29cdf0c0c7854a
|
[
"Apache-2.0"
] | null | null | null |
python/old/hiddenBlock.py
|
BenOsborn/Cerci
|
5785ae0c9db8a88a5ac8d91aed29cdf0c0c7854a
|
[
"Apache-2.0"
] | null | null | null |
from resources import relu, learnFunc, dot
class HiddenBlock:
def __init__(self, weights, bias):
self.weights = weights
self.bias = bias
def feedForward(self, hidden_inputs):
output = [
relu(
dot(hidden_inputs, weights) + self.bias
)
for weights in self.weights]
return output
def train(self, hidden_inputs, hidden_errors):
error = sum(hidden_errors) / len(hidden_errors)
predictions = self.feedForward(hidden_inputs)
prevErrors = []
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
prevError = error*relu(predictions[y], deriv=True)*self.weights[y][x]
prevErrors.append(prevError)
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
update = error*relu(predictions[y], deriv=True)*hidden_inputs[x]
learn_rate = learnFunc(update)
self.weights[y][x] -= learn_rate*update
biasUpdate = 0
for x in range(len(self.weights)):
biasUpdate += error*relu(predictions[x], deriv=True)/len(predictions)
learn_rate = learnFunc(biasUpdate)
self.bias -= learn_rate*biasUpdate
return prevErrors
| 33.8
| 85
| 0.579882
|
from resources import relu, learnFunc, dot
class HiddenBlock:
def __init__(self, weights, bias):
self.weights = weights
self.bias = bias
def feedForward(self, hidden_inputs):
output = [
relu(
dot(hidden_inputs, weights) + self.bias
)
for weights in self.weights]
return output
def train(self, hidden_inputs, hidden_errors):
error = sum(hidden_errors) / len(hidden_errors)
predictions = self.feedForward(hidden_inputs)
prevErrors = []
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
prevError = error*relu(predictions[y], deriv=True)*self.weights[y][x]
prevErrors.append(prevError)
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
update = error*relu(predictions[y], deriv=True)*hidden_inputs[x]
learn_rate = learnFunc(update)
self.weights[y][x] -= learn_rate*update
biasUpdate = 0
for x in range(len(self.weights)):
biasUpdate += error*relu(predictions[x], deriv=True)/len(predictions)
learn_rate = learnFunc(biasUpdate)
self.bias -= learn_rate*biasUpdate
return prevErrors
| true
| true
|
f70ae5186d99b2365c6e21842c72a147f0715710
| 9,285
|
py
|
Python
|
tests/tensorflow/test_tensorflow_model_export.py
|
0wu/mlflow
|
2b5a21af05defcfa80255c081b5d9f07443f3f64
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow/test_tensorflow_model_export.py
|
0wu/mlflow
|
2b5a21af05defcfa80255c081b5d9f07443f3f64
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow/test_tensorflow_model_export.py
|
0wu/mlflow
|
2b5a21af05defcfa80255c081b5d9f07443f3f64
|
[
"Apache-2.0"
] | null | null | null |
# pep8: disable=E501
from __future__ import print_function
import collections
import os
import pandas
import shutil
import unittest
import pandas as pd
import sklearn.datasets as datasets
import tensorflow as tf
from mlflow import tensorflow, pyfunc
from mlflow import tracking
from mlflow.utils.file_utils import TempDir
class TestModelExport(unittest.TestCase):
def helper(self, feature_spec, tmp, estimator, df):
"""
This functions handles exporting, logging, loading back, and predicting on an estimator for
testing purposes.
"""
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
saved_estimator_path = tmp.path("model")
os.makedirs(saved_estimator_path)
# Saving TensorFlow model.
saved_estimator_path = estimator.export_savedmodel(saved_estimator_path,
receiver_fn).decode("utf-8")
# Logging the TensorFlow model just saved.
tensorflow.log_saved_model(saved_model_dir=saved_estimator_path,
signature_def_key="predict",
artifact_path=tmp.path("hello"))
# Loading the saved TensorFlow model as a pyfunc.
x = pyfunc.load_pyfunc(saved_estimator_path)
# Predicting on the dataset using the pyfunc.
return x.predict(df)
def test_log_saved_model(self):
# This tests model logging capabilities on the sklearn.iris dataset.
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
trainingFeatures = {}
for i in range(0, 2):
# TensorFlow is fickle about feature names, so we remove offending characters
iris.feature_names[i] = iris.feature_names[i].replace(" ", "")
iris.feature_names[i] = iris.feature_names[i].replace("(", "")
iris.feature_names[i] = iris.feature_names[i].replace(")", "")
trainingFeatures[iris.feature_names[i]] = iris.data[:, i:i+1]
tf_feat_cols = []
feature_names = iris.feature_names[:2]
# Creating TensorFlow-specific numeric columns for input.
for col in iris.feature_names[:2]:
tf_feat_cols.append(tf.feature_column.numeric_column(col))
# Creating input training function.
input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,
y,
shuffle=False,
batch_size=1)
# Creating Deep Neural Network Regressor.
estimator = tf.estimator.DNNRegressor(feature_columns=tf_feat_cols,
hidden_units=[1])
# Training and creating expected predictions on training dataset.
estimator.train(input_train, steps=10)
# Saving the estimator's prediction on the training data; assume the DNNRegressor
# produces a single output column named 'predictions'
pred_col = "predictions"
estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]
estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})
old_tracking_uri = tracking.get_tracking_uri()
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
with TempDir(chdr=True, remove_on_exit=True) as tmp:
try:
# Creating dict of features names (str) to placeholders (tensors)
feature_spec = {}
for name in feature_names:
feature_spec[name] = tf.placeholder("float", name=name, shape=[150])
tracking.set_tracking_uri("test")
if should_start_run:
tracking.start_run()
pyfunc_preds_df = self.helper(feature_spec, tmp, estimator,
pandas.DataFrame(data=X, columns=feature_names))
# Asserting that the loaded model predictions are as expected.
assert estimator_preds_df.equals(pyfunc_preds_df)
finally:
# Restoring the old logging location.
tracking.end_run()
tracking.set_tracking_uri(old_tracking_uri)
def test_categorical_columns(self):
"""
This tests logging capabilities on datasets with categorical columns.
See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/get_started/\
regression/imports85.py
for reference code.
"""
with TempDir(chdr=False, remove_on_exit=True) as tmp:
path = os.path.abspath("tests/data/uci-autos-imports-85.data")
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("body-style", [""]),
("curb-weight", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
])
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
df = pandas.read_csv(path, names=types.keys(), dtype=types, na_values="?")
df = df.dropna()
# Extract the label from the features dataframe.
y_train = df.pop("price")
# Creating the input training function required.
trainingFeatures = {}
for i in df:
trainingFeatures[i] = df[i].values
input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,
y_train.values,
shuffle=False,
batch_size=1)
# Creating the feature columns required for the DNNRegressor.
body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"]
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
key="body-style", vocabulary_list=body_style_vocab)
feature_columns = [
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
# Since this is a DNN model, convert categorical columns from sparse
# to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf.feature_column.indicator_column(body_style)
]
# Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns
# defined above as input.
estimator = tf.estimator.DNNRegressor(
hidden_units=[20, 20], feature_columns=feature_columns)
# Training the estimator.
estimator.train(input_fn=input_train, steps=10)
# Saving the estimator's prediction on the training data; assume the DNNRegressor
# produces a single output column named 'predictions'
pred_col = "predictions"
estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]
estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})
# Setting the logging such that it is in the temp folder and deleted after the test.
old_tracking_dir = tracking.get_tracking_uri()
tracking_dir = os.path.abspath(tmp.path("mlruns"))
tracking.set_tracking_uri("file://%s" % tracking_dir)
tracking.start_run()
try:
# Creating dict of features names (str) to placeholders (tensors)
feature_spec = {}
feature_spec["body-style"] = tf.placeholder("string",
name="body-style",
shape=[None])
feature_spec["curb-weight"] = tf.placeholder("float",
name="curb-weight",
shape=[None])
feature_spec["highway-mpg"] = tf.placeholder("float",
name="highway-mpg",
shape=[None])
pyfunc_preds_df = self.helper(feature_spec, tmp, estimator, df)
# Asserting that the loaded model predictions are as expected. Allow for some
# imprecision as this is expected with TensorFlow.
pandas.testing.assert_frame_equal(
pyfunc_preds_df, estimator_preds_df, check_less_precise=6)
finally:
# Restoring the old logging location.
tracking.end_run()
tracking.set_tracking_uri(old_tracking_dir)
| 49.919355
| 99
| 0.566505
|
from __future__ import print_function
import collections
import os
import pandas
import shutil
import unittest
import pandas as pd
import sklearn.datasets as datasets
import tensorflow as tf
from mlflow import tensorflow, pyfunc
from mlflow import tracking
from mlflow.utils.file_utils import TempDir
class TestModelExport(unittest.TestCase):
def helper(self, feature_spec, tmp, estimator, df):
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
saved_estimator_path = tmp.path("model")
os.makedirs(saved_estimator_path)
saved_estimator_path = estimator.export_savedmodel(saved_estimator_path,
receiver_fn).decode("utf-8")
tensorflow.log_saved_model(saved_model_dir=saved_estimator_path,
signature_def_key="predict",
artifact_path=tmp.path("hello"))
x = pyfunc.load_pyfunc(saved_estimator_path)
return x.predict(df)
def test_log_saved_model(self):
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
trainingFeatures = {}
for i in range(0, 2):
iris.feature_names[i] = iris.feature_names[i].replace(" ", "")
iris.feature_names[i] = iris.feature_names[i].replace("(", "")
iris.feature_names[i] = iris.feature_names[i].replace(")", "")
trainingFeatures[iris.feature_names[i]] = iris.data[:, i:i+1]
tf_feat_cols = []
feature_names = iris.feature_names[:2]
for col in iris.feature_names[:2]:
tf_feat_cols.append(tf.feature_column.numeric_column(col))
input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,
y,
shuffle=False,
batch_size=1)
estimator = tf.estimator.DNNRegressor(feature_columns=tf_feat_cols,
hidden_units=[1])
estimator.train(input_train, steps=10)
# produces a single output column named 'predictions'
pred_col = "predictions"
estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]
estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})
old_tracking_uri = tracking.get_tracking_uri()
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
with TempDir(chdr=True, remove_on_exit=True) as tmp:
try:
# Creating dict of features names (str) to placeholders (tensors)
feature_spec = {}
for name in feature_names:
feature_spec[name] = tf.placeholder("float", name=name, shape=[150])
tracking.set_tracking_uri("test")
if should_start_run:
tracking.start_run()
pyfunc_preds_df = self.helper(feature_spec, tmp, estimator,
pandas.DataFrame(data=X, columns=feature_names))
# Asserting that the loaded model predictions are as expected.
assert estimator_preds_df.equals(pyfunc_preds_df)
finally:
# Restoring the old logging location.
tracking.end_run()
tracking.set_tracking_uri(old_tracking_uri)
def test_categorical_columns(self):
with TempDir(chdr=False, remove_on_exit=True) as tmp:
path = os.path.abspath("tests/data/uci-autos-imports-85.data")
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("body-style", [""]),
("curb-weight", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
])
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
df = pandas.read_csv(path, names=types.keys(), dtype=types, na_values="?")
df = df.dropna()
# Extract the label from the features dataframe.
y_train = df.pop("price")
# Creating the input training function required.
trainingFeatures = {}
for i in df:
trainingFeatures[i] = df[i].values
input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,
y_train.values,
shuffle=False,
batch_size=1)
# Creating the feature columns required for the DNNRegressor.
body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"]
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
key="body-style", vocabulary_list=body_style_vocab)
feature_columns = [
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
# Since this is a DNN model, convert categorical columns from sparse
# to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf.feature_column.indicator_column(body_style)
]
# Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns
# defined above as input.
estimator = tf.estimator.DNNRegressor(
hidden_units=[20, 20], feature_columns=feature_columns)
# Training the estimator.
estimator.train(input_fn=input_train, steps=10)
# Saving the estimator's prediction on the training data; assume the DNNRegressor
pred_col = "predictions"
estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]
estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})
old_tracking_dir = tracking.get_tracking_uri()
tracking_dir = os.path.abspath(tmp.path("mlruns"))
tracking.set_tracking_uri("file://%s" % tracking_dir)
tracking.start_run()
try:
feature_spec = {}
feature_spec["body-style"] = tf.placeholder("string",
name="body-style",
shape=[None])
feature_spec["curb-weight"] = tf.placeholder("float",
name="curb-weight",
shape=[None])
feature_spec["highway-mpg"] = tf.placeholder("float",
name="highway-mpg",
shape=[None])
pyfunc_preds_df = self.helper(feature_spec, tmp, estimator, df)
pandas.testing.assert_frame_equal(
pyfunc_preds_df, estimator_preds_df, check_less_precise=6)
finally:
tracking.end_run()
tracking.set_tracking_uri(old_tracking_dir)
| true
| true
|
f70ae55504722915015de818a6e0d47a6ddfbf80
| 4,881
|
py
|
Python
|
test/functional/wallet_import_with_label.py
|
natangl/refnet
|
59c1f1cdae3d79b1c6756185fe8051bd656f1e49
|
[
"MIT"
] | null | null | null |
test/functional/wallet_import_with_label.py
|
natangl/refnet
|
59c1f1cdae3d79b1c6756185fe8051bd656f1e49
|
[
"MIT"
] | null | null | null |
test/functional/wallet_import_with_label.py
|
natangl/refnet
|
59c1f1cdae3d79b1c6756185fe8051bd656f1e49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Refnet Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the behavior of RPC importprivkey on set and unset labels of
addresses.
It tests different cases in which an address is imported with importaddress
with or without a label and then its private key is imported with importprivkey
with and without a label.
"""
from test_framework.test_framework import RefnetTestFramework
from test_framework.wallet_util import test_address
class ImportWithLabel(RefnetTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
"""Main test logic"""
self.log.info(
"Test importaddress with label and importprivkey without label."
)
self.log.info("Import a watch-only address with a label.")
address = self.nodes[0].getnewaddress()
label = "Test Label"
self.nodes[1].importaddress(address, label)
test_address(self.nodes[1],
address,
iswatchonly=True,
ismine=False,
label=label)
self.log.info(
"Import the watch-only address's private key without a "
"label and the address should keep its label."
)
priv_key = self.nodes[0].dumpprivkey(address)
self.nodes[1].importprivkey(priv_key)
test_address(self.nodes[1],
address,
label=label)
self.log.info(
"Test importaddress without label and importprivkey with label."
)
self.log.info("Import a watch-only address without a label.")
address2 = self.nodes[0].getnewaddress()
self.nodes[1].importaddress(address2)
test_address(self.nodes[1],
address2,
iswatchonly=True,
ismine=False,
label="")
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key2 = self.nodes[0].dumpprivkey(address2)
label2 = "Test Label 2"
self.nodes[1].importprivkey(priv_key2, label2)
test_address(self.nodes[1],
address2,
label=label2)
self.log.info("Test importaddress with label and importprivkey with label.")
self.log.info("Import a watch-only address with a label.")
address3 = self.nodes[0].getnewaddress()
label3_addr = "Test Label 3 for importaddress"
self.nodes[1].importaddress(address3, label3_addr)
test_address(self.nodes[1],
address3,
iswatchonly=True,
ismine=False,
label=label3_addr)
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key3 = self.nodes[0].dumpprivkey(address3)
label3_priv = "Test Label 3 for importprivkey"
self.nodes[1].importprivkey(priv_key3, label3_priv)
test_address(self.nodes[1],
address3,
label=label3_priv)
self.log.info(
"Test importprivkey won't label new dests with the same "
"label as others labeled dests for the same key."
)
self.log.info("Import a watch-only legacy address with a label.")
address4 = self.nodes[0].getnewaddress()
label4_addr = "Test Label 4 for importaddress"
self.nodes[1].importaddress(address4, label4_addr)
test_address(self.nodes[1],
address4,
iswatchonly=True,
ismine=False,
label=label4_addr,
embedded=None)
self.log.info(
"Import the watch-only address's private key without a "
"label and new destinations for the key should have an "
"empty label while the 'old' destination should keep "
"its label."
)
priv_key4 = self.nodes[0].dumpprivkey(address4)
self.nodes[1].importprivkey(priv_key4)
embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address']
test_address(self.nodes[1],
embedded_addr,
label="")
test_address(self.nodes[1],
address4,
label=label4_addr)
self.stop_nodes()
if __name__ == "__main__":
ImportWithLabel().main()
| 35.889706
| 85
| 0.589224
|
from test_framework.test_framework import RefnetTestFramework
from test_framework.wallet_util import test_address
class ImportWithLabel(RefnetTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info(
"Test importaddress with label and importprivkey without label."
)
self.log.info("Import a watch-only address with a label.")
address = self.nodes[0].getnewaddress()
label = "Test Label"
self.nodes[1].importaddress(address, label)
test_address(self.nodes[1],
address,
iswatchonly=True,
ismine=False,
label=label)
self.log.info(
"Import the watch-only address's private key without a "
"label and the address should keep its label."
)
priv_key = self.nodes[0].dumpprivkey(address)
self.nodes[1].importprivkey(priv_key)
test_address(self.nodes[1],
address,
label=label)
self.log.info(
"Test importaddress without label and importprivkey with label."
)
self.log.info("Import a watch-only address without a label.")
address2 = self.nodes[0].getnewaddress()
self.nodes[1].importaddress(address2)
test_address(self.nodes[1],
address2,
iswatchonly=True,
ismine=False,
label="")
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key2 = self.nodes[0].dumpprivkey(address2)
label2 = "Test Label 2"
self.nodes[1].importprivkey(priv_key2, label2)
test_address(self.nodes[1],
address2,
label=label2)
self.log.info("Test importaddress with label and importprivkey with label.")
self.log.info("Import a watch-only address with a label.")
address3 = self.nodes[0].getnewaddress()
label3_addr = "Test Label 3 for importaddress"
self.nodes[1].importaddress(address3, label3_addr)
test_address(self.nodes[1],
address3,
iswatchonly=True,
ismine=False,
label=label3_addr)
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key3 = self.nodes[0].dumpprivkey(address3)
label3_priv = "Test Label 3 for importprivkey"
self.nodes[1].importprivkey(priv_key3, label3_priv)
test_address(self.nodes[1],
address3,
label=label3_priv)
self.log.info(
"Test importprivkey won't label new dests with the same "
"label as others labeled dests for the same key."
)
self.log.info("Import a watch-only legacy address with a label.")
address4 = self.nodes[0].getnewaddress()
label4_addr = "Test Label 4 for importaddress"
self.nodes[1].importaddress(address4, label4_addr)
test_address(self.nodes[1],
address4,
iswatchonly=True,
ismine=False,
label=label4_addr,
embedded=None)
self.log.info(
"Import the watch-only address's private key without a "
"label and new destinations for the key should have an "
"empty label while the 'old' destination should keep "
"its label."
)
priv_key4 = self.nodes[0].dumpprivkey(address4)
self.nodes[1].importprivkey(priv_key4)
embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address']
test_address(self.nodes[1],
embedded_addr,
label="")
test_address(self.nodes[1],
address4,
label=label4_addr)
self.stop_nodes()
if __name__ == "__main__":
ImportWithLabel().main()
| true
| true
|
f70ae599068c451f51ac29a3025118f4af8d1413
| 2,139
|
py
|
Python
|
g_function_weak_coupling/G_function.py
|
helene-todd/XPPAUT_code
|
e4caf112c03889a68eed0f4e5fa9d9d436918914
|
[
"MIT"
] | null | null | null |
g_function_weak_coupling/G_function.py
|
helene-todd/XPPAUT_code
|
e4caf112c03889a68eed0f4e5fa9d9d436918914
|
[
"MIT"
] | null | null | null |
g_function_weak_coupling/G_function.py
|
helene-todd/XPPAUT_code
|
e4caf112c03889a68eed0f4e5fa9d9d436918914
|
[
"MIT"
] | null | null | null |
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import numpy as np
import math as math
import random as rand
""" G(phi) function in Rinzel & Lewis' article (2003) under weak coupling """
""" This is under weak coupling theory, although one can note that gamma only serves to scale the function """
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']
rcParams.update({'figure.autolayout': True})
def T(I):
return math.log(I/(I-1))
def G(phi, I, gamma):
if phi != 0 and phi != 1:
return gamma*(2/T(I))*(phi*math.sinh((1-phi)*T(I)) - (1-phi)*math.sinh(phi*T(I))) + gamma*(beta/(I*T(I)*T(I)))*(math.exp(phi*T(I)) - math.exp((1-phi)*T(I)))
else :
return 0
""" Varying Gamma """
gamma = [0.4, 0.3, 0.2, 0.1, 0.01]
beta = 0.1
I = 1.8
plt.figure(figsize=(8,5))
vector_phi = np.linspace(0,1,1000)
zero_line = np.zeros(len(vector_phi))
plt.plot(vector_phi, zero_line, color='black', linestyle='--')
k = 0
for g in gamma :
vector_G = []
for el in vector_phi:
vector_G.append(G(el, I, g))
vector_G = np.array(vector_G)
plt.plot(vector_phi, vector_G, label=f'$\gamma = {g}$', color = c[k])
k += 1
plt.xlabel('$\phi$', size=14)
plt.ylabel('$G(\phi)$', size=14)
plt.title(f'G function for $I={I}, \\beta={beta}$')
zero_crossings = np.where(np.diff(np.sign(vector_G-zero_line)))[0]
print(zero_crossings)
plt.legend(loc='upper left')
plt.savefig(f'G_function_range_gammas_I={I}.png', dpi=600)
plt.show()
plt.close()
""" Varying I """
"""
gamma = 1
beta = 0.2
I = [1.15, 1.2, 1.4]
plt.figure(figsize=(8,5))
vector_phi = np.linspace(0,1,1000)
zero_line = np.zeros(len(vector_phi))
plt.plot(vector_phi, zero_line, linestyle='--', color='k')
k = 0
for current in I :
vector_G = []
for el in vector_phi:
vector_G.append(G(el, current, gamma))
vector_G = np.array(vector_G)
plt.plot(vector_phi, vector_G, label=f'$I = {current}$', color = c[k])
k += 1
plt.xlabel('$\phi$', size=14)
plt.ylabel('$G(\phi)$', size=14)
zero_crossings = np.where(np.diff(np.sign(vector_G-zero_line)))[0]
print(zero_crossings)
plt.legend()
plt.show()
"""
| 25.464286
| 164
| 0.635344
|
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import numpy as np
import math as math
import random as rand
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']
rcParams.update({'figure.autolayout': True})
def T(I):
return math.log(I/(I-1))
def G(phi, I, gamma):
if phi != 0 and phi != 1:
return gamma*(2/T(I))*(phi*math.sinh((1-phi)*T(I)) - (1-phi)*math.sinh(phi*T(I))) + gamma*(beta/(I*T(I)*T(I)))*(math.exp(phi*T(I)) - math.exp((1-phi)*T(I)))
else :
return 0
gamma = [0.4, 0.3, 0.2, 0.1, 0.01]
beta = 0.1
I = 1.8
plt.figure(figsize=(8,5))
vector_phi = np.linspace(0,1,1000)
zero_line = np.zeros(len(vector_phi))
plt.plot(vector_phi, zero_line, color='black', linestyle='--')
k = 0
for g in gamma :
vector_G = []
for el in vector_phi:
vector_G.append(G(el, I, g))
vector_G = np.array(vector_G)
plt.plot(vector_phi, vector_G, label=f'$\gamma = {g}$', color = c[k])
k += 1
plt.xlabel('$\phi$', size=14)
plt.ylabel('$G(\phi)$', size=14)
plt.title(f'G function for $I={I}, \\beta={beta}$')
zero_crossings = np.where(np.diff(np.sign(vector_G-zero_line)))[0]
print(zero_crossings)
plt.legend(loc='upper left')
plt.savefig(f'G_function_range_gammas_I={I}.png', dpi=600)
plt.show()
plt.close()
| true
| true
|
f70ae60535808681e6fb238519e5687bcd959b2c
| 1,086
|
py
|
Python
|
exoplanet/theano_ops/starry/base_op.py
|
Junjun1guo/exoplanet
|
5df07b16cf7f8770f02fa53598ae3961021cfd0f
|
[
"MIT"
] | 2
|
2020-05-29T07:10:48.000Z
|
2021-04-07T06:43:53.000Z
|
exoplanet/theano_ops/starry/base_op.py
|
Junjun1guo/exoplanet
|
5df07b16cf7f8770f02fa53598ae3961021cfd0f
|
[
"MIT"
] | null | null | null |
exoplanet/theano_ops/starry/base_op.py
|
Junjun1guo/exoplanet
|
5df07b16cf7f8770f02fa53598ae3961021cfd0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["StarryBaseOp"]
import pkg_resources
from theano import gof
from ..build_utils import get_compile_args, get_cache_version
class StarryBaseOp(gof.COp):
__props__ = ()
func_file = None
func_name = None
def __init__(self):
super(StarryBaseOp, self).__init__(self.func_file, self.func_name)
def c_code_cache_version(self):
return get_cache_version()
def c_headers(self, compiler):
return ["theano_helpers.h"]
def c_header_dirs(self, compiler):
return [
pkg_resources.resource_filename(__name__, "include"),
pkg_resources.resource_filename(__name__, "starry/starry"),
pkg_resources.resource_filename(__name__,
"starry/lib/eigen_3.3.3"),
pkg_resources.resource_filename(__name__,
"starry/lib/boost_1_66_0"),
]
def c_compile_args(self, compiler):
return get_compile_args(compiler)
| 26.487805
| 74
| 0.634438
|
from __future__ import division, print_function
__all__ = ["StarryBaseOp"]
import pkg_resources
from theano import gof
from ..build_utils import get_compile_args, get_cache_version
class StarryBaseOp(gof.COp):
__props__ = ()
func_file = None
func_name = None
def __init__(self):
super(StarryBaseOp, self).__init__(self.func_file, self.func_name)
def c_code_cache_version(self):
return get_cache_version()
def c_headers(self, compiler):
return ["theano_helpers.h"]
def c_header_dirs(self, compiler):
return [
pkg_resources.resource_filename(__name__, "include"),
pkg_resources.resource_filename(__name__, "starry/starry"),
pkg_resources.resource_filename(__name__,
"starry/lib/eigen_3.3.3"),
pkg_resources.resource_filename(__name__,
"starry/lib/boost_1_66_0"),
]
def c_compile_args(self, compiler):
return get_compile_args(compiler)
| true
| true
|
f70ae64b1876509c4ce63dc278cd4d9e00c288bd
| 293
|
py
|
Python
|
fileopener.py
|
PiSaucer/jumpcutter
|
3b5c723b3b70244471c26345c3bd686bf445b25b
|
[
"MIT"
] | null | null | null |
fileopener.py
|
PiSaucer/jumpcutter
|
3b5c723b3b70244471c26345c3bd686bf445b25b
|
[
"MIT"
] | null | null | null |
fileopener.py
|
PiSaucer/jumpcutter
|
3b5c723b3b70244471c26345c3bd686bf445b25b
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter.filedialog import askopenfilename
import time
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
print(filename)
time.sleep(1)
| 32.555556
| 97
| 0.774744
|
from tkinter import *
from tkinter.filedialog import askopenfilename
import time
Tk().withdraw()
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
print(filename)
time.sleep(1)
| true
| true
|
f70ae6e8124f0eeef44e323640c73e9d0141dc7c
| 4,098
|
py
|
Python
|
run_analyst.py
|
nevertheless-ui/TelegramData_Analyst
|
6c7b33560a2b8b26bce99c9a82efa6b4796d5828
|
[
"MIT"
] | null | null | null |
run_analyst.py
|
nevertheless-ui/TelegramData_Analyst
|
6c7b33560a2b8b26bce99c9a82efa6b4796d5828
|
[
"MIT"
] | null | null | null |
run_analyst.py
|
nevertheless-ui/TelegramData_Analyst
|
6c7b33560a2b8b26bce99c9a82efa6b4796d5828
|
[
"MIT"
] | null | null | null |
# Filename: analyst.py
"""Analyst is a tool to look up (and export selected) data and insights
from exported data from chats and channels in Telegram
using Python and PyQt5."""
import sys
import pandas as pd
from pathlib import Path
from PyQt5 import QtWidgets, QtCore
from PyQt5 import uic
from backend import (
converter,
handler,
)
__version__ = '0.1'
__author__ = 'Artyom Filippenko'
df = pd.DataFrame({'a': ['Mary', 'Jim', 'John'],
'b': [100, 200, 300],
'c': ['a', 'b', 'c']})
# VARS SECTION
# IMPORT LOCALE
IMPORT_WINDOW_TITLE = 'TelegramData Analyst - Import'
IMPORT_WINDOW_MSG = 'This software is designed for analysis of Telegram channels and chats.'
IMPORT_BROWSE_MSG = 'Open file'
IMPORT_PATHLINE_MSG = 'Please, add path to JSON file, exported from Telegram Application...'
IMPORT_BROWSE_BTN_NAME = 'Browse'
IMPORT_ANALYSE_BTN_NAME = 'Analyze'
IMPORT_PATH_MSG = 'File'
# ANALYST LOCALE
ANALYST_WINDOW_TITLE = 'TelegramData Analyst - Explorer'
ANALYST_STATUSBAR_PREFIX_MSG = 'Exploring data from json-file:'
ANALYST_WINDOW_MSG = 'Analyzing file'
ANALYST_RETURN_BTN_NAME = 'Return to import...'
ANALYST_EXPORT_BTN_NAME = 'Export results...'
# ANALYST LOCALE
#ALERT_WINDOW_TITLE = 'Alert!'
# UI path
IMPORT_UI_PATH = './frontend/import_data.ui'
MAIN_UI_PATH = './frontend/workspace.ui'
#ALERT_UI_PATH = './frontend/alert.ui'
class ImportWindow(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self._build()
self.ui.show()
def _build(self):
self.ui = uic.loadUi(IMPORT_UI_PATH)
# Locale
self.ui.setWindowTitle(IMPORT_WINDOW_TITLE)
self.ui.import_description_message.setText(IMPORT_WINDOW_MSG)
self.ui.browse_files_btn.setText(IMPORT_BROWSE_BTN_NAME)
self.ui.analyse_file_btn.setText(IMPORT_ANALYSE_BTN_NAME)
self.ui.import_file_pathline.setText(IMPORT_PATHLINE_MSG)
# Loading UI logic
self.ui.browse_files_btn.clicked.connect(self._browse_files)
self.ui.analyse_file_btn.clicked.connect(self._open_analyst)
def _browse_files(self):
import_file = QtWidgets.QFileDialog.getOpenFileName(self, IMPORT_BROWSE_MSG,
'./', "Json file (*.json)")
self.ui.import_file_pathline.setText(import_file[0])
def _open_analyst(self):
if self.ui.import_file_pathline.text() == IMPORT_PATHLINE_MSG:
json_file_path = ''
else:
json_file_path = Path(self.ui.import_file_pathline.text())
self.analyst = AnalysisWindow(self)
self.analyst.import_json_file(json_file_path)
self.analyst.update_table_view
self.analyst.ui.statusbar.showMessage(ANALYST_STATUSBAR_PREFIX_MSG + ' ' + \
str(json_file_path))
self.ui.hide()
class AnalysisWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent, QtCore.Qt.Window)
self._build()
self.ui.show()
#self.import_json_file()
#self.update_table_view()
def _build(self):
self.ui = uic.loadUi(MAIN_UI_PATH)
# Locale
self.ui.setWindowTitle(ANALYST_WINDOW_TITLE)
self.ui.return_btn.setText(ANALYST_RETURN_BTN_NAME)
self.ui.export_btn.setText(ANALYST_EXPORT_BTN_NAME)
# Loading UI logic
self.ui.return_btn.clicked.connect(self._return_to_import)
def _return_to_import(self):
self.ui.close()
self.parent().ui.show()
def import_json_file(self, json_file_path):
self._data = converter.convert_tg_json(json_file_path)
def update_table_view(self):
self.ui.test_msg.setText(str(df.columns))
self.model = handler.pandasModel(self._data)
self.ui.table_view.setModel(self.model)
self.ui.table_view.show()
def main():
app = QtWidgets.QApplication(sys.argv)
window = ImportWindow()
#window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 31.523077
| 92
| 0.678136
|
import sys
import pandas as pd
from pathlib import Path
from PyQt5 import QtWidgets, QtCore
from PyQt5 import uic
from backend import (
converter,
handler,
)
__version__ = '0.1'
__author__ = 'Artyom Filippenko'
df = pd.DataFrame({'a': ['Mary', 'Jim', 'John'],
'b': [100, 200, 300],
'c': ['a', 'b', 'c']})
IMPORT_WINDOW_TITLE = 'TelegramData Analyst - Import'
IMPORT_WINDOW_MSG = 'This software is designed for analysis of Telegram channels and chats.'
IMPORT_BROWSE_MSG = 'Open file'
IMPORT_PATHLINE_MSG = 'Please, add path to JSON file, exported from Telegram Application...'
IMPORT_BROWSE_BTN_NAME = 'Browse'
IMPORT_ANALYSE_BTN_NAME = 'Analyze'
IMPORT_PATH_MSG = 'File'
ANALYST_WINDOW_TITLE = 'TelegramData Analyst - Explorer'
ANALYST_STATUSBAR_PREFIX_MSG = 'Exploring data from json-file:'
ANALYST_WINDOW_MSG = 'Analyzing file'
ANALYST_RETURN_BTN_NAME = 'Return to import...'
ANALYST_EXPORT_BTN_NAME = 'Export results...'
IMPORT_UI_PATH = './frontend/import_data.ui'
MAIN_UI_PATH = './frontend/workspace.ui'
class ImportWindow(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self._build()
self.ui.show()
def _build(self):
self.ui = uic.loadUi(IMPORT_UI_PATH)
self.ui.setWindowTitle(IMPORT_WINDOW_TITLE)
self.ui.import_description_message.setText(IMPORT_WINDOW_MSG)
self.ui.browse_files_btn.setText(IMPORT_BROWSE_BTN_NAME)
self.ui.analyse_file_btn.setText(IMPORT_ANALYSE_BTN_NAME)
self.ui.import_file_pathline.setText(IMPORT_PATHLINE_MSG)
self.ui.browse_files_btn.clicked.connect(self._browse_files)
self.ui.analyse_file_btn.clicked.connect(self._open_analyst)
def _browse_files(self):
import_file = QtWidgets.QFileDialog.getOpenFileName(self, IMPORT_BROWSE_MSG,
'./', "Json file (*.json)")
self.ui.import_file_pathline.setText(import_file[0])
def _open_analyst(self):
if self.ui.import_file_pathline.text() == IMPORT_PATHLINE_MSG:
json_file_path = ''
else:
json_file_path = Path(self.ui.import_file_pathline.text())
self.analyst = AnalysisWindow(self)
self.analyst.import_json_file(json_file_path)
self.analyst.update_table_view
self.analyst.ui.statusbar.showMessage(ANALYST_STATUSBAR_PREFIX_MSG + ' ' + \
str(json_file_path))
self.ui.hide()
class AnalysisWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent, QtCore.Qt.Window)
self._build()
self.ui.show()
def _build(self):
self.ui = uic.loadUi(MAIN_UI_PATH)
self.ui.setWindowTitle(ANALYST_WINDOW_TITLE)
self.ui.return_btn.setText(ANALYST_RETURN_BTN_NAME)
self.ui.export_btn.setText(ANALYST_EXPORT_BTN_NAME)
self.ui.return_btn.clicked.connect(self._return_to_import)
def _return_to_import(self):
self.ui.close()
self.parent().ui.show()
def import_json_file(self, json_file_path):
self._data = converter.convert_tg_json(json_file_path)
def update_table_view(self):
self.ui.test_msg.setText(str(df.columns))
self.model = handler.pandasModel(self._data)
self.ui.table_view.setModel(self.model)
self.ui.table_view.show()
def main():
app = QtWidgets.QApplication(sys.argv)
window = ImportWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| true
| true
|
f70ae90ab76967c88f0a8aa21711c21e46566272
| 2,516
|
py
|
Python
|
tests/1_local/test_ping.py
|
kpimparkar/cloudmesh-cloud
|
cb5ec6c2c8e5eb8c41a697cb67e72183808adb64
|
[
"Apache-2.0"
] | null | null | null |
tests/1_local/test_ping.py
|
kpimparkar/cloudmesh-cloud
|
cb5ec6c2c8e5eb8c41a697cb67e72183808adb64
|
[
"Apache-2.0"
] | null | null | null |
tests/1_local/test_ping.py
|
kpimparkar/cloudmesh-cloud
|
cb5ec6c2c8e5eb8c41a697cb67e72183808adb64
|
[
"Apache-2.0"
] | null | null | null |
###############################################################
# pytest -v --capture=no tests/1_local/test_name.py
# pytest -v tests/1_local/test_name.py
# pytest -v --capture=no tests/1_local/test_name.py:Test_name.<METHIDNAME>
###############################################################
import pytest
from cloudmesh.common.StopWatch import StopWatch
from cloudmesh.common3.host import Host
from cloudmesh.common.Printer import Printer
from cloudmesh.common3.Benchmark import Benchmark
from cloudmesh.common.util import HEADING
Benchmark.debug()
# multiping only works if you have root, so we can not use it
# from multiping import MultiPing
hosts = ['127.0.0.1',
'localhost',
'www.indiana.edu',
'www.pbs.org',
'www.github.com',
'www.redhat.com',
'www.openstack.org',
'www.bbc.com',
'www.ec2instances.info',
'aws.amazon.com']
@pytest.mark.incremental
class TestPing:
def ping(self, processors=1):
StopWatch.start(f"total p={processors} c=1")
r = Host.ping(hosts, processors=processors, count=1)
StopWatch.stop(f"total p={processors} c=1")
return r
def test_internal_ping(self):
HEADING()
StopWatch.start("total _ping")
for host in hosts:
location = {
'ip': host,
'count': 1,
}
StopWatch.start(f"ping {host}")
result = Host._ping(location)
StopWatch.stop(f"ping {host}")
StopWatch.stop("total _ping")
assert result['success']
def test_ping_processor(self):
HEADING()
print()
for processors in range(1, len(hosts)):
print("Processors:", processors)
results = self.ping(processors=processors)
print(Printer.write(results,
order=['host',
'success',
'max',
'min',
'stddev']
))
for result in results:
assert result['success']
#
# only works if you have root, so not suitable
#
# def test_multi_ping(self):
# ping = MultiPing(hosts)
# responses, no_responses = ping(hosts, timeout=2, retry=1)
def test_benchmark(self):
HEADING()
StopWatch.benchmark(csv=True, sysinfo=False)
| 29.6
| 75
| 0.521463
| true
| true
|
|
f70ae99dd663fc32f1c74a2e029d50b8365dd95c
| 3,993
|
py
|
Python
|
tlib/networks/VGGnet_train.py
|
shallowyuan/cosegmentor-crf
|
c84a9418b70f3f3c7c6a7e998de5835182619f30
|
[
"BSD-2-Clause"
] | null | null | null |
tlib/networks/VGGnet_train.py
|
shallowyuan/cosegmentor-crf
|
c84a9418b70f3f3c7c6a7e998de5835182619f30
|
[
"BSD-2-Clause"
] | null | null | null |
tlib/networks/VGGnet_train.py
|
shallowyuan/cosegmentor-crf
|
c84a9418b70f3f3c7c6a7e998de5835182619f30
|
[
"BSD-2-Clause"
] | null | null | null |
import tensorflow as tf
from networks.network import Network
#define
n_classes = 21
_feat_stride = [16,]
anchor_scales = [8, 16, 32]
class VGGnet_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
#self.im_info = tf.placeholder(tf.float32, shape=[None, 3])
#self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])
self.keep_prob = tf.placeholder(tf.float32)
self.segmentation=tf.placeholder(tf.float32,shape=[None,900])
self.rois=tf.placeholder(tf.float32,shape=[None,5])
#self.mweights=tf.placeholder(tf.float32,shape=[None,2])
self.sweights=tf.placeholder(tf.bool,shape=[None])
self.labels=tf.placeholder(tf.int32,shape=[None])
self.layers = dict({'data':self.data, 'segmentation':self.segmentation, 'sweight':self.sweights, 'labels': self.labels, "rois": self.rois})
self.trainable = trainable
self.setup()
def setup(self):
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)
.conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)
.conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3'))
#=========ROIPOOLING=======
(self.feed('conv4_3','rois')
.roi_pool(7, 7, 1.0/16, name='pool_4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool5'))
#========= RPN ============
# (self.feed('conv5_3')
# .conv(3,3,512,1,1,name='rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))#
# (self.feed('rpn_cls_score','gt_boxes','im_info','data')
# .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))#
# # Loss of rpn_cls & rpn_boxes
# (self.feed('rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
# (self.feed('rpn_cls_score')
# .reshape_layer(2,name = 'rpn_cls_score_reshape')
# .softmax(name='rpn_cls_prob'))
#
# (self.feed('rpn_cls_prob')
# .reshape_layer(len(anchor_scales)*3*2,name = 'rpn_cls_prob_reshape'))
#
# (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
# .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))
#
# (self.feed('rpn_rois','gt_boxes')
# .proposal_target_layer(n_classes,name = 'roi-data'))
#========= RCNN ============
(self.feed('pool5')
.fc(1024, name='fc6')
.dropout(0.5, name='drop6')
.fc(1024, name='fc7')
.dropout(0.5, name='drop7')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
# (self.feed('drop7')
# .fc(n_classes*4, relu=False, name='bbox_pred'))
#==========segment network===
(self.feed('conv5_3')
.conv(1,1,512,1 , 1, padding='VALID', name='conv5_4')
.fc(512, name='fc8')
.fc(900, relu=False, name='seg_score'))
| 40.744898
| 147
| 0.539194
|
import tensorflow as tf
from networks.network import Network
n_classes = 21
_feat_stride = [16,]
anchor_scales = [8, 16, 32]
class VGGnet_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.keep_prob = tf.placeholder(tf.float32)
self.segmentation=tf.placeholder(tf.float32,shape=[None,900])
self.rois=tf.placeholder(tf.float32,shape=[None,5])
self.sweights=tf.placeholder(tf.bool,shape=[None])
self.labels=tf.placeholder(tf.int32,shape=[None])
self.layers = dict({'data':self.data, 'segmentation':self.segmentation, 'sweight':self.sweights, 'labels': self.labels, "rois": self.rois})
self.trainable = trainable
self.setup()
def setup(self):
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)
.conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)
.conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3'))
(self.feed('conv4_3','rois')
.roi_pool(7, 7, 1.0/16, name='pool_4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool5'))
(self.feed('pool5')
.fc(1024, name='fc6')
.dropout(0.5, name='drop6')
.fc(1024, name='fc7')
.dropout(0.5, name='drop7')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
(self.feed('conv5_3')
.conv(1,1,512,1 , 1, padding='VALID', name='conv5_4')
.fc(512, name='fc8')
.fc(900, relu=False, name='seg_score'))
| true
| true
|
f70aea4b89b68eac3f7c8bada0d6ff77a9ea5c18
| 1,575
|
py
|
Python
|
algorithms/pattern_matching/kmp.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | 1
|
2019-04-18T03:29:02.000Z
|
2019-04-18T03:29:02.000Z
|
algorithms/pattern_matching/kmp.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
algorithms/pattern_matching/kmp.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
"""
KMP pattern matching algorithm.
Finds matching patterns in text in linear time.
Text: A longer string of length n. (n > m)
Pattern: Substring to be searched for of length m.
Works by precompiling the pattern string to create a LPS string array.
LPS: Longest Proper Prefix. Longest prefix string that is also a suffix
Time Complexity: O(n+m)
Space Complexity: O(m)
"""
def compute_lps(pattern: str, m: int) -> list:
"""
Algorithm to compute LPS for given pattern.
"""
lps = [0] * m
i, j = 1, 0 # j = length of previous longest prefix-suffix
while i < m:
if pattern[i] == pattern[j]:
j += 1
lps[i] = j
i += 1
else:
# backtrack j. It cannot suddenly reduce to 0 as we might have a
# suffix - prefix pair ending at j
if j > 0:
j = lps[j - 1]
else:
i += 1
return lps
def kmp(text: str, pattern: str) -> None:
n, m = len(text), len(pattern)
lps = compute_lps(pattern, m)
i, j = 0, 0
while i < n:
if text[i] == pattern[j]:
i += 1
j += 1
if j == m:
print("pattern", pattern, "found at location", i - j)
j = lps[j - 1]
elif i < n and pattern[j] != text[i]:
if j > 0:
j = lps[j - 1]
else:
i += 1
if __name__ == "__main__":
text = "ABABABCABABABCABABABCABABACABABAC"
pattern = "ABABAC"
kmp(text, pattern)
pattern = "AAACAAAAAC"
kmp(text, pattern)
| 25.403226
| 76
| 0.526349
|
def compute_lps(pattern: str, m: int) -> list:
lps = [0] * m
i, j = 1, 0
while i < m:
if pattern[i] == pattern[j]:
j += 1
lps[i] = j
i += 1
else:
if j > 0:
j = lps[j - 1]
else:
i += 1
return lps
def kmp(text: str, pattern: str) -> None:
n, m = len(text), len(pattern)
lps = compute_lps(pattern, m)
i, j = 0, 0
while i < n:
if text[i] == pattern[j]:
i += 1
j += 1
if j == m:
print("pattern", pattern, "found at location", i - j)
j = lps[j - 1]
elif i < n and pattern[j] != text[i]:
if j > 0:
j = lps[j - 1]
else:
i += 1
if __name__ == "__main__":
text = "ABABABCABABABCABABABCABABACABABAC"
pattern = "ABABAC"
kmp(text, pattern)
pattern = "AAACAAAAAC"
kmp(text, pattern)
| true
| true
|
f70aebfd8d72e36ce7a654aef6710e7290e9ca98
| 602
|
py
|
Python
|
pyapple/interface/exceptions.py
|
fxrcha/PyApple
|
6f1336c63583204d4b2b723dd1de8d1895e42430
|
[
"MIT"
] | 13
|
2021-02-21T04:16:40.000Z
|
2022-03-21T23:34:18.000Z
|
pyapple/interface/exceptions.py
|
fxrcha/PyApple
|
6f1336c63583204d4b2b723dd1de8d1895e42430
|
[
"MIT"
] | null | null | null |
pyapple/interface/exceptions.py
|
fxrcha/PyApple
|
6f1336c63583204d4b2b723dd1de8d1895e42430
|
[
"MIT"
] | 4
|
2021-02-21T04:16:42.000Z
|
2021-03-13T00:22:42.000Z
|
class HTTPException(Exception):
"""
Exception which happens when HTTP status code is not 200 (OK).
"""
def __init__(self, code, url) -> None:
self.error = f"While requesting to {url}, request returned status {code}."
def __str__(self) -> str:
return self.error
class NoCatalogResult(Exception):
"""
Exception which happens when there is no product with given product id.
"""
def __init__(self, product_id) -> None:
self.error = f"There is no catalog result with id {product_id}."
def __str__(self) -> str:
return self.error
| 26.173913
| 82
| 0.641196
|
class HTTPException(Exception):
def __init__(self, code, url) -> None:
self.error = f"While requesting to {url}, request returned status {code}."
def __str__(self) -> str:
return self.error
class NoCatalogResult(Exception):
def __init__(self, product_id) -> None:
self.error = f"There is no catalog result with id {product_id}."
def __str__(self) -> str:
return self.error
| true
| true
|
f70aec16628b9cde66e6d82f41b5f6d38354523d
| 1,451
|
py
|
Python
|
mongo_connector/constants.py
|
hannelita/mongo-connector
|
3df79c656b11bc8f540b42e0a4604bb71a1e2434
|
[
"Apache-2.0"
] | 15
|
2015-01-06T08:10:21.000Z
|
2017-03-12T23:06:43.000Z
|
mongo_connector/constants.py
|
hannelita/mongo-connector
|
3df79c656b11bc8f540b42e0a4604bb71a1e2434
|
[
"Apache-2.0"
] | 16
|
2015-03-11T09:28:33.000Z
|
2016-03-06T14:45:54.000Z
|
mongo_connector/constants.py
|
hannelita/mongo-connector
|
3df79c656b11bc8f540b42e0a4604bb71a1e2434
|
[
"Apache-2.0"
] | 13
|
2015-03-21T13:39:10.000Z
|
2022-03-14T11:50:24.000Z
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Maximum # of documents to process before recording timestamp
# default = -1 (no maximum)
DEFAULT_BATCH_SIZE = -1
# Interval in seconds between doc manager flushes (i.e. auto commit)
# default = None (never auto commit)
DEFAULT_COMMIT_INTERVAL = None
# Maximum # of documents to send in a single bulk request through a
# DocManager.
DEFAULT_MAX_BULK = 1000
# The default MongoDB field that will serve as the unique key for the
# target system.
DEFAULT_UNIQUE_KEY = "_id"
# Default host and facility for logging to the syslog.
DEFAULT_SYSLOG_HOST = "localhost:512"
DEFAULT_SYSLOG_FACILITY = "user"
# ROTATING LOGFILE
# The type of interval
# (seconds, minutes, hours... c.f. logging.handlers.TimedRotatingFileHandler)
DEFAULT_LOGFILE_WHEN = "midnight"
# The rollover interval
DEFAULT_LOGFILE_INTERVAL = 1
# Number of log files to keep
DEFAULT_LOGFILE_BACKUPCOUNT = 7
| 33.744186
| 77
| 0.772571
|
= None
DEFAULT_SYSLOG_HOST = "localhost:512"
DEFAULT_SYSLOG_FACILITY = "user"
DEFAULT_LOGFILE_WHEN = "midnight"
DEFAULT_LOGFILE_INTERVAL = 1
DEFAULT_LOGFILE_BACKUPCOUNT = 7
| true
| true
|
f70aec762062680c5259cfb5eb77332eb404d8dd
| 4,433
|
py
|
Python
|
mcpython/common/state/ui/UIPartScrollBar.py
|
mcpython4-coding/core
|
8698efe93f5a25421bfa508d769d8fdc8e9ce24c
|
[
"CC0-1.0",
"MIT"
] | 2
|
2019-11-02T05:26:11.000Z
|
2019-11-03T08:52:18.000Z
|
mcpython/common/state/ui/UIPartScrollBar.py
|
mcpython4-coding/core
|
8698efe93f5a25421bfa508d769d8fdc8e9ce24c
|
[
"CC0-1.0",
"MIT"
] | 25
|
2019-11-02T05:24:29.000Z
|
2022-02-09T14:09:08.000Z
|
mcpython/common/state/ui/UIPartScrollBar.py
|
mcpython4-coding/core
|
8698efe93f5a25421bfa508d769d8fdc8e9ce24c
|
[
"CC0-1.0",
"MIT"
] | 5
|
2019-11-09T05:36:06.000Z
|
2021-11-28T13:07:08.000Z
|
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import mcpython.engine.ResourceLoader
import mcpython.util.texture
import PIL.Image
import pyglet
from mcpython.engine.rendering.RenderingLayerManager import MIDDLE_GROUND
from mcpython.util.annotation import onlyInClient
from pyglet.window import mouse
from .AbstractUIPart import AbstractUIPart
IMAGE = asyncio.get_event_loop().run_until_complete(
mcpython.engine.ResourceLoader.read_image(
"assets/minecraft/textures/gui/container/creative_inventory/tabs.png"
)
)
scroll_active = mcpython.util.texture.to_pyglet_image(
IMAGE.crop((233, 0, 243, 14)).resize((20, 28), PIL.Image.NEAREST)
)
scroll_inactive = mcpython.util.texture.to_pyglet_image(
IMAGE.crop((244, 0, 255, 14)).resize((20, 28), PIL.Image.NEAREST)
)
class UIScrollBar(AbstractUIPart):
"""
Class representing a scroll bar in a gui-state of the game
The user is needed to work with the values returned by this system (on_scroll)
"""
def __init__(self, position: tuple, scroll_distance: int, on_scroll=None):
super().__init__(position, (0, 0))
self.selected = False
self.bar_position = position
self.bar_sprite = pyglet.sprite.Sprite(scroll_active)
self.scroll_distance = scroll_distance
self.on_scroll = on_scroll
self.active = True
def move(self, delta: int):
x, y = self.bar_position
self.bar_position = x, max(
self.position[1], min(self.position[1] + self.scroll_distance, y + delta)
)
if self.on_scroll:
self.on_scroll(0, 0, 0, delta, 0, 0, self.get_status())
def bind_to_eventbus(self):
self.master[0].eventbus.subscribe("user:mouse:press", self.on_mouse_press)
self.master[0].eventbus.subscribe("user:mouse:release", self.on_mouse_release)
self.master[0].eventbus.subscribe("user:mouse:drag", self.on_mouse_drag)
self.master[0].eventbus.subscribe(
MIDDLE_GROUND.getRenderingEvent(), self.on_draw
)
def on_mouse_press(self, x, y, button, mod):
if not self.active:
return
if button != mouse.LEFT:
return
bx, by = self.bar_position
if 0 <= x - bx <= 20 and 0 <= y - by <= 28:
self.selected = True
def on_mouse_release(self, x, y, button, mod):
self.selected = False
def on_mouse_drag(self, x, y, dx, dy, button, mod):
if not self.active:
return
if button == mouse.LEFT and self.selected:
self.bar_position = (
self.position[0],
max(self.position[1], min(self.position[1] + self.scroll_distance, y)),
)
if self.on_scroll:
self.on_scroll(x, y, dx, dy, button, mod, self.get_status())
def on_draw(self):
if not self.active:
return
if self.bar_sprite.position != self.bar_position:
self.bar_sprite.position = self.bar_position
self.bar_sprite.draw()
def get_status(self) -> float:
"""
Will return the status as an float between 0 and 1 where 0 is the downer end and 1 the upper
"""
if not self.active:
return 0
return (self.bar_position[1] - self.position[1]) / self.scroll_distance
def set_status(self, status: float):
self.bar_position = (
self.bar_position[0],
self.position[1] + status * self.scroll_distance,
)
def set_size_respective(self, position: tuple, scroll_distance: int):
if not self.active:
return
status = self.get_status()
self.position = position
self.bar_position = (
self.position[0],
self.position[1] + status * scroll_distance,
)
self.scroll_distance = scroll_distance
| 34.364341
| 103
| 0.653508
|
import asyncio
import mcpython.engine.ResourceLoader
import mcpython.util.texture
import PIL.Image
import pyglet
from mcpython.engine.rendering.RenderingLayerManager import MIDDLE_GROUND
from mcpython.util.annotation import onlyInClient
from pyglet.window import mouse
from .AbstractUIPart import AbstractUIPart
IMAGE = asyncio.get_event_loop().run_until_complete(
mcpython.engine.ResourceLoader.read_image(
"assets/minecraft/textures/gui/container/creative_inventory/tabs.png"
)
)
scroll_active = mcpython.util.texture.to_pyglet_image(
IMAGE.crop((233, 0, 243, 14)).resize((20, 28), PIL.Image.NEAREST)
)
scroll_inactive = mcpython.util.texture.to_pyglet_image(
IMAGE.crop((244, 0, 255, 14)).resize((20, 28), PIL.Image.NEAREST)
)
class UIScrollBar(AbstractUIPart):
def __init__(self, position: tuple, scroll_distance: int, on_scroll=None):
super().__init__(position, (0, 0))
self.selected = False
self.bar_position = position
self.bar_sprite = pyglet.sprite.Sprite(scroll_active)
self.scroll_distance = scroll_distance
self.on_scroll = on_scroll
self.active = True
def move(self, delta: int):
x, y = self.bar_position
self.bar_position = x, max(
self.position[1], min(self.position[1] + self.scroll_distance, y + delta)
)
if self.on_scroll:
self.on_scroll(0, 0, 0, delta, 0, 0, self.get_status())
def bind_to_eventbus(self):
self.master[0].eventbus.subscribe("user:mouse:press", self.on_mouse_press)
self.master[0].eventbus.subscribe("user:mouse:release", self.on_mouse_release)
self.master[0].eventbus.subscribe("user:mouse:drag", self.on_mouse_drag)
self.master[0].eventbus.subscribe(
MIDDLE_GROUND.getRenderingEvent(), self.on_draw
)
def on_mouse_press(self, x, y, button, mod):
if not self.active:
return
if button != mouse.LEFT:
return
bx, by = self.bar_position
if 0 <= x - bx <= 20 and 0 <= y - by <= 28:
self.selected = True
def on_mouse_release(self, x, y, button, mod):
self.selected = False
def on_mouse_drag(self, x, y, dx, dy, button, mod):
if not self.active:
return
if button == mouse.LEFT and self.selected:
self.bar_position = (
self.position[0],
max(self.position[1], min(self.position[1] + self.scroll_distance, y)),
)
if self.on_scroll:
self.on_scroll(x, y, dx, dy, button, mod, self.get_status())
def on_draw(self):
if not self.active:
return
if self.bar_sprite.position != self.bar_position:
self.bar_sprite.position = self.bar_position
self.bar_sprite.draw()
def get_status(self) -> float:
if not self.active:
return 0
return (self.bar_position[1] - self.position[1]) / self.scroll_distance
def set_status(self, status: float):
self.bar_position = (
self.bar_position[0],
self.position[1] + status * self.scroll_distance,
)
def set_size_respective(self, position: tuple, scroll_distance: int):
if not self.active:
return
status = self.get_status()
self.position = position
self.bar_position = (
self.position[0],
self.position[1] + status * scroll_distance,
)
self.scroll_distance = scroll_distance
| true
| true
|
f70aed7ed6e1e8d9c85bf0f6c2447a7ce378f443
| 18,439
|
py
|
Python
|
inb/linkedin/settings.py
|
JoshiAyush/linkedin-bot
|
f333218678ab6bc468644dca50aec684b4e29bde
|
[
"MIT"
] | 2
|
2021-05-30T07:03:31.000Z
|
2021-06-03T03:00:31.000Z
|
inb/linkedin/settings.py
|
JoshiAyush/linkedin-bot
|
f333218678ab6bc468644dca50aec684b4e29bde
|
[
"MIT"
] | 3
|
2021-05-28T10:32:03.000Z
|
2021-06-18T09:45:21.000Z
|
inb/linkedin/settings.py
|
JoshiAyush/linkedin-bot
|
f333218678ab6bc468644dca50aec684b4e29bde
|
[
"MIT"
] | 1
|
2021-03-22T16:01:40.000Z
|
2021-03-22T16:01:40.000Z
|
# Copyright 2021, joshiayus Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of joshiayus Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import os
import sys
import click
import pathlib
import logging
import subprocess
from urllib import (request, parse)
try:
from gettext import gettext as _ # pylint: disable=unused-import
except ImportError:
_ = lambda msg: msg
CONNECTION_LIMIT_EXCEED_EXCEPTION_MESSAGE = """Invalid connection limit %d.
LinkedIn does not allow to send over 80 invitations per-day to a non-premium
account.
Please be patient and make sure that the connection limit is between (0, 80]
and you are not running the bot in a day more than once otherwise LinkedIn
will block your IP."""
LOG_DIR_PATH = pathlib.Path(__file__).resolve().parent.parent.parent / 'logs'
# Variable's value decides whether logging to stream is allowed in the entire
# project.
#
# Note: You must not update the value of this variable directly, you must call
# the `TurnOnLoggingLevelDebug()` function to update its value otherwise you may
# update the value of this variable but this particular module will not have any
# effect of that change.
LOGGING_TO_STREAM_ENABLED = False
# We want to create the log directory if it does not exists otherwise the file
# handlers for loggers used in other modules will complain about its absence.
if not os.path.exists(LOG_DIR_PATH):
os.mkdir(LOG_DIR_PATH)
LOG_FORMAT_STR = '%(asctime)s:%(name)s:%(levelname)s:%(funcName)s\n%(message)s' # pylint: disable=line-too-long
INB_VERSION = '1.0.0'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(LOG_DIR_PATH / __name__, mode='a')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT_STR))
logger.addHandler(file_handler)
def TurnOnLoggingToStream() -> None:
global LOGGING_TO_STREAM_ENABLED
LOGGING_TO_STREAM_ENABLED = True
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(logging.Formatter(LOG_FORMAT_STR))
logger.addHandler(stream_handler)
_CHROME_BINARY_NOT_FOUND_MSG = _('Google Chrome binary is not present in path %s.')
_CHROME_BINARIES_NOT_FOUND_MSG = _(
'Google Chrome binary is not present in the following paths\n'
'%s')
_CHROME_DRIVER_BINARY = 'chromedriver'
_CHROME_DRIVER_ZIP_FILE = None
# Chromedriver that comes with the repository is only compatible with the Google
# Chrome version _GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER.
#
# This version must be changed with the installed 'chromedriver' version that
# comes with the repository.
_GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER = '96.0.4664.110'
def _ExtractChromeDriverZip(chromedriver_zip: str) -> None:
"""Utility routine to `unzip` the downloaded `chromedriver` archive present
at path `chromedriver_zip`.
This function will extract all the contents of `chromedriver` archive in the
same directory where the archive is installed.
Args:
chromedriver_zip: `Chromedriver` archive file path.
"""
import zipfile # pylint: disable=import-outside-toplevel
driver_dir = pathlib.PurePath(chromedriver_zip).parent
with zipfile.ZipFile(chromedriver_zip, 'r') as zip_f:
zip_f.extractall(driver_dir)
def _RetrieveChromeDriverZip(url: str, dest: str, verbose: bool = True) -> str:
"""Utility function to download `chromedriver` zip file at the specified URL.
Utility function to download and store `chromedriver` zip file in the
destination `dest`. This function also sets the value of
`_CHROME_DRIVER_ZIP_FILE` variable equals to the `chromedriver` zip file name
at the specified URL so to later use the archive file name to extract the
`chromedriver` executable from it.
Args:
url: URL to download the file from.
dest: Destination where to place the file after downloading.
verbose: If `True` shows the downloading status.
Returns:
Destination where the file is placed after installing.
"""
u = request.urlopen(url)
scheme, netloc, path, query, fragment = parse.urlsplit(url) # pylint: disable=unused-variable
filename = os.path.basename(path)
if not filename:
filename = 'downloaded'
global _CHROME_DRIVER_ZIP_FILE
_CHROME_DRIVER_ZIP_FILE = filename
if dest:
filename = os.path.join(dest, filename)
with open(filename, 'wb') as f:
if verbose:
meta = u.info()
if hasattr(meta, 'getheaders'):
meta_func = meta.getheaders
else:
meta_func = meta.get_all
meta_length = meta_func('Content-Length')
file_size = None
if meta_length:
file_size = int(meta_length[0])
click.echo(_('Downloading: %s Bytes: %s') % (url, file_size))
file_size_dl = 0
block_size = 8192
while True:
buffer = u.read(block_size)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if verbose:
status = '{0:16}'.format(file_size_dl) # pylint: disable=consider-using-f-string
if file_size:
status += ' [{0:6.2f}%]'.format(file_size_dl * 100 / file_size) # pylint: disable=consider-using-f-string
status += chr(13)
click.echo(f'{status}\r', None, False)
if verbose:
click.echo('')
return filename
def _GetGoogleChromeBinaryVersion() -> str:
"""Returns the `Google Chrome` version the user is using in its system.
This function returns the `Google Chrome` version independent of the platform
the user is running. This function creates a child process using `subprocess`
module to talk to the shell and retrieve the `Google Chrome` version present
in the system.
This function checks the following locations where the `Google Chrome`
executable could be present in user's system.
* `Linux`
On `linux` platform this function checks if the binary `google-chrome` and
`google-chrome-stable` is present, if yes this function in its child process
will provide a flag `--version` to the `Google Chrome` binary present in
order to retrieve the version string.
The child process calls for `linux` platform looks something like the
following:
* If `google-chrome` is present.
```shell
google-chrome --version
```
* If `google-chrome` is not present.
```shell
google-chrome-stable --version
```
* `MacOS`
On `MacOs` platform this function will create a child process and will
provide `--version` flag to the `Google Chrome` executable present in the
path `/Applications/Google Chrome.app/Contents/MacOS/Google Chrome`.
The child process call for `linux` platform looks something like the
following:
```shell
/Applications/Google Chrome.app/Contents/MacOS/Google Chrome --version
```
@TODO(joshiayush): Find alternative paths on `MacOS`.
* `Windows`
God forbid if you are on `Windows` because there is no tested version of
this function on `Windows` but so far what we've come up with is the
following:
This function will search for the `Google Chrome` executable in the
following paths:
```python
chrome_binary_path = (
'%ProgramFiles%\\Google\\Chrome\\Application\\chrome.exe',
'%ProgramFiles(x86)%\\Google\\Chrome\\Application\\chrome.exe',
'%LocalAppData%\\Google\\Chrome\\Application\\chrome.exe',
'C:\\Users\\USER\\AppData\\Local\\Google\\Chrome\\Application\\chrome.exe'
)
```
and will try to execute the following commands in its child process to
retrieve the `Google Chrome` version.
```shell
wmic datafile where name=${path} get Version /value
```
where path is the `element` of `chrome_binary_path` tuple on `Windows`.
Returns:
`Google Chrome` version.
"""
version_regex = r'[0-9]{2}.[0-9]{1}.[0-9]{4}.[0-9]{3}'
if sys.platform == 'linux':
chrome_binaries = ['google-chrome', 'google-chrome-stable']
chrome_binary_path = []
for binary in chrome_binaries:
try:
chrome_binary_path.append(
subprocess.check_output(['whereis', '-b',
binary]).decode('utf-8')[len(binary) +
1::].strip())
except subprocess.CalledProcessError as exc:
logger.error(('CalledProcessError: Exit code %d.'
'\n%s.'), exc.returncode, exc.output)
continue
for i in range(len(chrome_binary_path)):
if chrome_binary_path[i] == '':
chrome_binary_path = chrome_binary_path[0:i:] + chrome_binary_path[i +
1::]
for path in chrome_binary_path:
try:
version = subprocess.check_output([path, '--version']).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
elif sys.platform == 'darwin':
chrome_binary_path = (
r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
for path in chrome_binary_path:
try:
version = subprocess.check_output([path, '--version']).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
elif sys.platform in ('win32', 'cygwin'):
chrome_binary_path = (
r'%ProgramFiles%\Google\Chrome\Application\chrome.exe',
r'%ProgramFiles(x86)%\Google\Chrome\Application\chrome.exe',
r'%LocalAppData%\Google\Chrome\Application\chrome.exe',
r'C:\Users\USER\AppData\Local\Google\Chrome\Application\chrome.exe')
for path in chrome_binary_path:
try:
version = subprocess.check_output([
'wmic', 'datafile', 'where', f'name={path}', 'get', 'Version',
'/value'
]).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
def _CheckIfChromeDriverIsCompatibleWithGoogleChromeInstalled() -> str:
"""Checks if the `chromedriver` that comes with the `inb` repository is
compatible with the `Google Chrome` version the user is using in its system.
This function checks if the `Google Chrome` version the user is using in its
system matches against the `Google Chrome` version supported by `chromedriver`
that comes with the `inb` repository which is
`_GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER`.
Returns:
True if the `chromedriver` is compatible with the `Google Chrome` installed.
"""
google_chrome_version = _GetGoogleChromeBinaryVersion()
if google_chrome_version == _GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER: # pylint: disable=line-too-long
return True
return False
def _GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor(major: str) -> str:
"""Returns the platform specific `chromedriver` version that is compatible
with the `Google Chrome` major given as `major`.
This function only supports `Google Chrome` major that is present in the
following list of `chromedriver` releases:
```python
(
'95.0.4638.69',
'96.0.4664.45',
'97.0.4692.36',
)
```
`Google Chrome` version against a major that is not present in the above list
will not receive a compatible version of `chromedriver` through this function.
Args:
major: `Google Chrome` major.
Returns:
Platform specific `chromedriver` file URL that is compatible with
`Google Chrome` with the give `major` as major.
"""
chromedriver_storage_googleapis = 'https://chromedriver.storage.googleapis.com' # pylint: disable=line-too-long
for release in (
'95.0.4638.69',
'96.0.4664.45',
'97.0.4692.36',
):
if release.startswith(major):
if sys.platform == 'linux':
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_linux64.zip' # pylint: disable=line-too-long
elif sys.platform == 'darwin':
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_mac64.zip' # pylint: disable=line-too-long
elif sys.platform in ('win32', 'cygwin'):
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_win32.zip' # pylint: disable=line-too-long
def _GetPlatformSpecificChromeDriverCompatibleVersionUrl(
google_chrome_version: str) -> str:
"""Returns the platform specific `chromedriver` version URL that is
compatible with the `Google Chrome` version given as `google_chrome_version`.
This function takes out the `major` version from the `google_chrome_version`
string and calls the function
`_GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor()` with the major
that we just took out to receive a compatible `chromedriver` version URL.
Args:
google_chrome_version: `Google Chrome` version.
Returns:
`Chromedriver` version URL that is compatible with the `Google Chrome`
version given as `google_chrome_version`.
"""
major_regex = re.compile(r'^[0-9]{2}')
google_chrome_major = re.search(major_regex, google_chrome_version).group(0)
return _GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor(
google_chrome_major)
def _InstallGoogleChromeCompatibleChromeDriver() -> None:
"""Installs `Google Chrome` compatible `chromedriver`.
This function installs a `Google Chrome` compatible `chromedriver` version.
Because user's can have different versions of `Google Chrome` installed in
their system so we need to handle the case where the `chromedriver` that
comes with the `inb` repository is not compatible with the `Google Chrome`
version they are using on their system.
To handle the above case we install the compatible version of `chromedriver`
from the `googleapis` by calling the function
`_GetPlatformSpecificChromeDriverCompatibleVersionUrl()` to return the URL
for `chromedriver` and then later using that URL with function
`_RetrieveChromeDriverZip()` to install `chromedriver` from `googleapis`.
Once the `chromedriver` is installed we know that it is in a form of zip so
we need to extract it and we do so by calling the function
`_ExtractChromeDriverZip()` with the zip file path.
"""
_RetrieveChromeDriverZip(
_GetPlatformSpecificChromeDriverCompatibleVersionUrl(
_GetGoogleChromeBinaryVersion()),
True if LOGGING_TO_STREAM_ENABLED else False)
_ExtractChromeDriverZip(
os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_ZIP_FILE))
def _GetInstalledChromeDriverDirectoryPath() -> str:
"""Returns the absolute filesystem path to the directory where `chromedriver`
that comes with the `inb` repository is installed.
Returns:
Absolute filesystem path the `chromedriver` directory.
"""
dir_path = os.path.dirname(os.path.abspath(__file__))
last_inb_indx = dir_path.rfind('inb')
return os.path.join(dir_path[:last_inb_indx:], 'driver')
def ChromeDriverAbsolutePath() -> str:
"""Returns the absolute filesystem path to the `chromedriver` installed inside
the `driver` directory.
This function checks if the `chromedriver` that comes with the `inb`
repository is compatible with the `Google Chrome` installed in the user's
system; if yes it returns the absolute filesystem path to the `chromedriver`
installed inside the `driver` directory.
If the `chromedriver` if not compatible with the `Google Chrome` version the
user is using in its system then this function tries to install a compatible
`chromedriver` inside the directory `driver` and if successful, it returns the
absolute filesystem path to the `chromedriver`.
Returns:
Absolute path to `chromedriver`.
"""
if _CheckIfChromeDriverIsCompatibleWithGoogleChromeInstalled():
return os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_BINARY)
_InstallGoogleChromeCompatibleChromeDriver()
return os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_BINARY)
def GetLinkedInUrl() -> str:
"""Returns URL to LinkedIn."""
return 'https://www.linkedin.com'
def GetLinkedInLoginPageUrl() -> str:
"""Returns URL to LinkedIn's login page."""
return GetLinkedInUrl() + '/login/'
def GetLinkedInMyNetworkPageUrl() -> str:
"""Returns URL to LinkedIn's `MyNetwork` page."""
return GetLinkedInUrl() + '/mynetwork/'
| 37.940329
| 130
| 0.719996
|
import re
import os
import sys
import click
import pathlib
import logging
import subprocess
from urllib import (request, parse)
try:
from gettext import gettext as _
except ImportError:
_ = lambda msg: msg
CONNECTION_LIMIT_EXCEED_EXCEPTION_MESSAGE = """Invalid connection limit %d.
LinkedIn does not allow to send over 80 invitations per-day to a non-premium
account.
Please be patient and make sure that the connection limit is between (0, 80]
and you are not running the bot in a day more than once otherwise LinkedIn
will block your IP."""
LOG_DIR_PATH = pathlib.Path(__file__).resolve().parent.parent.parent / 'logs'
# project.
#
# Note: You must not update the value of this variable directly, you must call
# the `TurnOnLoggingLevelDebug()` function to update its value otherwise you may
# update the value of this variable but this particular module will not have any
# effect of that change.
LOGGING_TO_STREAM_ENABLED = False
# We want to create the log directory if it does not exists otherwise the file
# handlers for loggers used in other modules will complain about its absence.
if not os.path.exists(LOG_DIR_PATH):
os.mkdir(LOG_DIR_PATH)
LOG_FORMAT_STR = '%(asctime)s:%(name)s:%(levelname)s:%(funcName)s\n%(message)s' # pylint: disable=line-too-long
INB_VERSION = '1.0.0'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(LOG_DIR_PATH / __name__, mode='a')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT_STR))
logger.addHandler(file_handler)
def TurnOnLoggingToStream() -> None:
global LOGGING_TO_STREAM_ENABLED
LOGGING_TO_STREAM_ENABLED = True
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(logging.Formatter(LOG_FORMAT_STR))
logger.addHandler(stream_handler)
_CHROME_BINARY_NOT_FOUND_MSG = _('Google Chrome binary is not present in path %s.')
_CHROME_BINARIES_NOT_FOUND_MSG = _(
'Google Chrome binary is not present in the following paths\n'
'%s')
_CHROME_DRIVER_BINARY = 'chromedriver'
_CHROME_DRIVER_ZIP_FILE = None
# Chromedriver that comes with the repository is only compatible with the Google
# Chrome version _GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER.
#
# This version must be changed with the installed 'chromedriver' version that
# comes with the repository.
_GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER = '96.0.4664.110'
def _ExtractChromeDriverZip(chromedriver_zip: str) -> None:
import zipfile # pylint: disable=import-outside-toplevel
driver_dir = pathlib.PurePath(chromedriver_zip).parent
with zipfile.ZipFile(chromedriver_zip, 'r') as zip_f:
zip_f.extractall(driver_dir)
def _RetrieveChromeDriverZip(url: str, dest: str, verbose: bool = True) -> str:
u = request.urlopen(url)
scheme, netloc, path, query, fragment = parse.urlsplit(url) # pylint: disable=unused-variable
filename = os.path.basename(path)
if not filename:
filename = 'downloaded'
global _CHROME_DRIVER_ZIP_FILE
_CHROME_DRIVER_ZIP_FILE = filename
if dest:
filename = os.path.join(dest, filename)
with open(filename, 'wb') as f:
if verbose:
meta = u.info()
if hasattr(meta, 'getheaders'):
meta_func = meta.getheaders
else:
meta_func = meta.get_all
meta_length = meta_func('Content-Length')
file_size = None
if meta_length:
file_size = int(meta_length[0])
click.echo(_('Downloading: %s Bytes: %s') % (url, file_size))
file_size_dl = 0
block_size = 8192
while True:
buffer = u.read(block_size)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if verbose:
status = '{0:16}'.format(file_size_dl) # pylint: disable=consider-using-f-string
if file_size:
status += ' [{0:6.2f}%]'.format(file_size_dl * 100 / file_size) # pylint: disable=consider-using-f-string
status += chr(13)
click.echo(f'{status}\r', None, False)
if verbose:
click.echo('')
return filename
def _GetGoogleChromeBinaryVersion() -> str:
version_regex = r'[0-9]{2}.[0-9]{1}.[0-9]{4}.[0-9]{3}'
if sys.platform == 'linux':
chrome_binaries = ['google-chrome', 'google-chrome-stable']
chrome_binary_path = []
for binary in chrome_binaries:
try:
chrome_binary_path.append(
subprocess.check_output(['whereis', '-b',
binary]).decode('utf-8')[len(binary) +
1::].strip())
except subprocess.CalledProcessError as exc:
logger.error(('CalledProcessError: Exit code %d.'
'\n%s.'), exc.returncode, exc.output)
continue
for i in range(len(chrome_binary_path)):
if chrome_binary_path[i] == '':
chrome_binary_path = chrome_binary_path[0:i:] + chrome_binary_path[i +
1::]
for path in chrome_binary_path:
try:
version = subprocess.check_output([path, '--version']).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
elif sys.platform == 'darwin':
chrome_binary_path = (
r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
for path in chrome_binary_path:
try:
version = subprocess.check_output([path, '--version']).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
elif sys.platform in ('win32', 'cygwin'):
chrome_binary_path = (
r'%ProgramFiles%\Google\Chrome\Application\chrome.exe',
r'%ProgramFiles(x86)%\Google\Chrome\Application\chrome.exe',
r'%LocalAppData%\Google\Chrome\Application\chrome.exe',
r'C:\Users\USER\AppData\Local\Google\Chrome\Application\chrome.exe')
for path in chrome_binary_path:
try:
version = subprocess.check_output([
'wmic', 'datafile', 'where', f'name={path}', 'get', 'Version',
'/value'
]).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
def _CheckIfChromeDriverIsCompatibleWithGoogleChromeInstalled() -> str:
google_chrome_version = _GetGoogleChromeBinaryVersion()
if google_chrome_version == _GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER: # pylint: disable=line-too-long
return True
return False
def _GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor(major: str) -> str:
chromedriver_storage_googleapis = 'https://chromedriver.storage.googleapis.com' # pylint: disable=line-too-long
for release in (
'95.0.4638.69',
'96.0.4664.45',
'97.0.4692.36',
):
if release.startswith(major):
if sys.platform == 'linux':
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_linux64.zip' # pylint: disable=line-too-long
elif sys.platform == 'darwin':
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_mac64.zip' # pylint: disable=line-too-long
elif sys.platform in ('win32', 'cygwin'):
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_win32.zip' # pylint: disable=line-too-long
def _GetPlatformSpecificChromeDriverCompatibleVersionUrl(
google_chrome_version: str) -> str:
major_regex = re.compile(r'^[0-9]{2}')
google_chrome_major = re.search(major_regex, google_chrome_version).group(0)
return _GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor(
google_chrome_major)
def _InstallGoogleChromeCompatibleChromeDriver() -> None:
_RetrieveChromeDriverZip(
_GetPlatformSpecificChromeDriverCompatibleVersionUrl(
_GetGoogleChromeBinaryVersion()),
True if LOGGING_TO_STREAM_ENABLED else False)
_ExtractChromeDriverZip(
os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_ZIP_FILE))
def _GetInstalledChromeDriverDirectoryPath() -> str:
dir_path = os.path.dirname(os.path.abspath(__file__))
last_inb_indx = dir_path.rfind('inb')
return os.path.join(dir_path[:last_inb_indx:], 'driver')
def ChromeDriverAbsolutePath() -> str:
if _CheckIfChromeDriverIsCompatibleWithGoogleChromeInstalled():
return os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_BINARY)
_InstallGoogleChromeCompatibleChromeDriver()
return os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_BINARY)
def GetLinkedInUrl() -> str:
return 'https://www.linkedin.com'
def GetLinkedInLoginPageUrl() -> str:
return GetLinkedInUrl() + '/login/'
def GetLinkedInMyNetworkPageUrl() -> str:
return GetLinkedInUrl() + '/mynetwork/'
| true
| true
|
f70aee199c5545672cba5de1ccc8222cc8715ead
| 6,272
|
py
|
Python
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/types/feed_item_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/types/feed_item_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/types/feed_item_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v7.resources.types import feed_item as gagr_feed_item
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.services',
marshal='google.ads.googleads.v7',
manifest={
'GetFeedItemRequest',
'MutateFeedItemsRequest',
'FeedItemOperation',
'MutateFeedItemsResponse',
'MutateFeedItemResult',
},
)
class GetFeedItemRequest(proto.Message):
r"""Request message for
[FeedItemService.GetFeedItem][google.ads.googleads.v7.services.FeedItemService.GetFeedItem].
Attributes:
resource_name (str):
Required. The resource name of the feed item
to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateFeedItemsRequest(proto.Message):
r"""Request message for
[FeedItemService.MutateFeedItems][google.ads.googleads.v7.services.FeedItemService.MutateFeedItems].
Attributes:
customer_id (str):
Required. The ID of the customer whose feed
items are being modified.
operations (Sequence[google.ads.googleads.v7.services.types.FeedItemOperation]):
Required. The list of operations to perform
on individual feed items.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v7.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='FeedItemOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class FeedItemOperation(proto.Message):
r"""A single operation (create, update, remove) on an feed item.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v7.resources.types.FeedItem):
Create operation: No resource name is
expected for the new feed item.
update (google.ads.googleads.v7.resources.types.FeedItem):
Update operation: The feed item is expected
to have a valid resource name.
remove (str):
Remove operation: A resource name for the removed feed item
is expected, in this format:
``customers/{customer_id}/feedItems/{feed_id}~{feed_item_id}``
"""
update_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_feed_item.FeedItem,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof='operation',
message=gagr_feed_item.FeedItem,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof='operation',
)
class MutateFeedItemsResponse(proto.Message):
r"""Response message for an feed item mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v7.services.types.MutateFeedItemResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateFeedItemResult',
)
class MutateFeedItemResult(proto.Message):
r"""The result for the feed item mutate.
Attributes:
resource_name (str):
Returned for successful operations.
feed_item (google.ads.googleads.v7.resources.types.FeedItem):
The mutated feed item with only mutable fields after mutate.
The field will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
feed_item = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_feed_item.FeedItem,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.837563
| 112
| 0.656888
|
import proto
from google.ads.googleads.v7.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v7.resources.types import feed_item as gagr_feed_item
from google.protobuf import field_mask_pb2
from google.rpc import status_pb2
__protobuf__ = proto.module(
package='google.ads.googleads.v7.services',
marshal='google.ads.googleads.v7',
manifest={
'GetFeedItemRequest',
'MutateFeedItemsRequest',
'FeedItemOperation',
'MutateFeedItemsResponse',
'MutateFeedItemResult',
},
)
class GetFeedItemRequest(proto.Message):
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateFeedItemsRequest(proto.Message):
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='FeedItemOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class FeedItemOperation(proto.Message):
update_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_feed_item.FeedItem,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof='operation',
message=gagr_feed_item.FeedItem,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof='operation',
)
class MutateFeedItemsResponse(proto.Message):
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateFeedItemResult',
)
class MutateFeedItemResult(proto.Message):
resource_name = proto.Field(
proto.STRING,
number=1,
)
feed_item = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_feed_item.FeedItem,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| true
| true
|
f70aeeecd3129be0699bbccd47f8bc33eb31ff00
| 13,131
|
py
|
Python
|
Thesis@3.9.1/Lib/site-packages/setuptools/_distutils/command/config.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/setuptools/_distutils/command/config.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/setuptools/_distutils/command/config.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
"""distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
import os, re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.sysconfig import customize_compiler
from distutils import log
LANG_EXT = {"c": ".c", "c++": ".cxx"}
class config(Command):
description = "prepare to build"
user_options = [
("compiler=", None, "specify the compiler type"),
("cc=", None, "specify the compiler executable"),
("include-dirs=", "I", "list of directories to search for header files"),
("define=", "D", "C preprocessor macros to define"),
("undef=", "U", "C preprocessor macros to undefine"),
("libraries=", "l", "external C libraries to link with"),
("library-dirs=", "L", "directories to search for external C libraries"),
("noisy", None, "show every action (compile, link, run, ...) taken"),
(
"dump-source",
None,
"dump generated source files before attempting to compile them",
),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=1
)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
with open(filename, "w") as file:
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs, lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable(
[obj],
prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang,
)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", " ".join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = True
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = False
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
with open(out) as file:
match = False
while True:
line = file.readline()
if line == "":
break
if pattern.search(line):
match = True
break
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = True
except CompileError:
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs, libraries, library_dirs, lang)
ok = True
except (CompileError, LinkError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(
body, headers, include_dirs, libraries, library_dirs, lang
)
self.spawn([exe])
ok = True
except (CompileError, LinkError, DistutilsExecError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(
self,
func,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
decl=0,
call=0,
):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs, libraries, library_dirs)
def check_lib(
self,
library,
library_dirs=None,
headers=None,
include_dirs=None,
other_libraries=[],
):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link(
"int main (void) { }",
headers,
include_dirs,
[library] + other_libraries,
library_dirs,
)
def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(
body="/* No body */", headers=[header], include_dirs=include_dirs
)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info("%s", filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
| 34.830239
| 88
| 0.596527
|
import os, re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.sysconfig import customize_compiler
from distutils import log
LANG_EXT = {"c": ".c", "c++": ".cxx"}
class config(Command):
description = "prepare to build"
user_options = [
("compiler=", None, "specify the compiler type"),
("cc=", None, "specify the compiler executable"),
("include-dirs=", "I", "list of directories to search for header files"),
("define=", "D", "C preprocessor macros to define"),
("undef=", "U", "C preprocessor macros to undefine"),
("libraries=", "l", "external C libraries to link with"),
("library-dirs=", "L", "directories to search for external C libraries"),
("noisy", None, "show every action (compile, link, run, ...) taken"),
(
"dump-source",
None,
"dump generated source files before attempting to compile them",
),
]
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
self.noisy = 1
self.dump_source = 1
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
def _check_compiler(self):
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=1
)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
with open(filename, "w") as file:
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs, lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable(
[obj],
prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang,
)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", " ".join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
from distutils.ccompiler import CompileError
self._check_compiler()
ok = True
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = False
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"):
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
with open(out) as file:
match = False
while True:
line = file.readline()
if line == "":
break
if pattern.search(line):
match = True
break
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = True
except CompileError:
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs, libraries, library_dirs, lang)
ok = True
except (CompileError, LinkError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(
body, headers, include_dirs, libraries, library_dirs, lang
)
self.spawn([exe])
ok = True
except (CompileError, LinkError, DistutilsExecError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(
self,
func,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
decl=0,
call=0,
):
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs, libraries, library_dirs)
def check_lib(
self,
library,
library_dirs=None,
headers=None,
include_dirs=None,
other_libraries=[],
):
self._check_compiler()
return self.try_link(
"int main (void) { }",
headers,
include_dirs,
[library] + other_libraries,
library_dirs,
)
def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"):
return self.try_cpp(
body="/* No body */", headers=[header], include_dirs=include_dirs
)
def dump_file(filename, head=None):
if head is None:
log.info("%s", filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
| true
| true
|
f70aef653d2ef3b2e8701681f111cc6df59eb702
| 407
|
py
|
Python
|
packages/python/plotly/plotly/validators/splom/_hoverinfosrc.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/splom/_hoverinfosrc.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/splom/_hoverinfosrc.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="splom", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| 33.916667
| 82
| 0.68059
|
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="splom", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| true
| true
|
f70af01d82997fdd5c275f0119e776bc23bc6b61
| 1,730
|
py
|
Python
|
local_image.py
|
EnterpriseWS/visitor_badge
|
98593be190af299148068598b7074c4105a7d20e
|
[
"MIT"
] | null | null | null |
local_image.py
|
EnterpriseWS/visitor_badge
|
98593be190af299148068598b7074c4105a7d20e
|
[
"MIT"
] | null | null | null |
local_image.py
|
EnterpriseWS/visitor_badge
|
98593be190af299148068598b7074c4105a7d20e
|
[
"MIT"
] | null | null | null |
from PIL import Image
from datetime import datetime
import sys
import base64
from io import BytesIO
import platform
import urllib.parse
IMG_FOLDER = ''
if platform.system() == 'Linux':
IMG_FOLDER = 'images/'
elif platform.system() == 'Windows':
IMG_FOLDER = '.\\images\\'
def get_base64_image(filename: str = '.\\images\\face_dither.png') -> str:
try:
encoded_image = b''
image_format = ''
with Image.open(filename) as image:
image_format = image.format
# print(f'Format is: {image_format}')
# print(f'Mode is: {image.mode}')
buffer = BytesIO()
image.save(buffer, image.format)
image_bytes = buffer.getvalue()
encoded_image = base64.b64encode(image_bytes)
# ****** Below is simply for testing if the image ******
# data stored in the file is correct or not.
# ------------------------------------------------------
# image_buffer = BytesIO(base64.b64decode(encoded_image))
# with Image.open(image_buffer) as fil_image:
# new_filename = 'Robert' + datetime.now().strftime('_%Y%m%d_%H%M%S') \
# + '.' + image_format.lower()
# fil_image.save(IMG_FOLDER + new_filename, image_format)
# ------------------------------------------------------
print(f'The Base64 image = {urllib.parse.quote(encoded_image.decode())}')
return encoded_image.decode()
except Exception as ex:
print(f'No image found: {ex}')
if __name__ == '__main__':
if len(sys.argv) == 2:
# print(f'The param = {sys.argv[1]}')
get_base64_image(sys.argv[1])
else:
get_base64_image()
| 35.306122
| 83
| 0.554913
|
from PIL import Image
from datetime import datetime
import sys
import base64
from io import BytesIO
import platform
import urllib.parse
IMG_FOLDER = ''
if platform.system() == 'Linux':
IMG_FOLDER = 'images/'
elif platform.system() == 'Windows':
IMG_FOLDER = '.\\images\\'
def get_base64_image(filename: str = '.\\images\\face_dither.png') -> str:
try:
encoded_image = b''
image_format = ''
with Image.open(filename) as image:
image_format = image.format
buffer = BytesIO()
image.save(buffer, image.format)
image_bytes = buffer.getvalue()
encoded_image = base64.b64encode(image_bytes)
print(f'The Base64 image = {urllib.parse.quote(encoded_image.decode())}')
return encoded_image.decode()
except Exception as ex:
print(f'No image found: {ex}')
if __name__ == '__main__':
if len(sys.argv) == 2:
get_base64_image(sys.argv[1])
else:
get_base64_image()
| true
| true
|
f70af03c0b36276c0b4f3b78e7ca52a6c4bd0075
| 3,242
|
py
|
Python
|
br_record.py
|
purplewish07/pybrother
|
fe94d95ae90c677a72a82e2a3c7a602f3f16803f
|
[
"Unlicense"
] | null | null | null |
br_record.py
|
purplewish07/pybrother
|
fe94d95ae90c677a72a82e2a3c7a602f3f16803f
|
[
"Unlicense"
] | null | null | null |
br_record.py
|
purplewish07/pybrother
|
fe94d95ae90c677a72a82e2a3c7a602f3f16803f
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Date: 2021/01/06
# Author: Shaun
# 檔案功能描述:
# 自動抓取機台狀態,更新資料庫,配合bash腳本,寫入系統排程crontab -e
# -------------------------------------------------------------------
import socket
# import pymysql
import os
import time
from datetime import datetime
# cnc_config = [('cnc27', "192.168.3.27"), ('cnc28', "192.168.3.28"), ('cnc29', "192.168.3.29"), ('cnc43', "192.168.3.43"),
# ('cnc44', "192.168.3.44"), ('cnc45', "192.168.3.45"), ('cnc46', "192.168.3.46")]
# cnc_config = [('cnc27', "192.168.3.27"), ('cnc28', "192.168.3.28"), ('cnc29', "192.168.3.29"), ('cnc46', "192.168.3.46")]
cnc_config = [('cnc46', "192.168.3.46")]
def get_from_brother(ip='127.0.0.1', port=10000):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(10)
try:
client.connect((ip, port))
# 取得工件數
# instruct = '%CLOD WKCNTR ' + os.linesep + '00%'
instruct = '%CLOD WKCNTR 00\r\n%'
# instruct = '%CLOD PRD3 00\r\n%'
# instruct = '%CIOCREF GRN 00\r\n%'
client.send(instruct.encode())
# lines = client.recv(3096).decode().split(os.linesep)
lines = client.recv(1500).decode()
# arr= [line.strip() for line in lines]
# n=0
# for e in arr:
# v1=e.split(',')
# if n>1:
# # v1[1]=datetime.fromtimestamp(int(v1[1]) / 1e3)
# v1[1]=datetime.fromtimestamp(int(v1[1]))
# # date=v1[1]
# n+=1
# print(v1)
# print(lines)
# lines = client.recv(1024).decode()
print(lines)
lines = lines.split(os.linesep)
lines = [line for line in lines if line.startswith('A01')] # 選出以A01開頭的行
fields = lines[0].split(',') # 拆分出字段,第3個字段就是目標[工件計數]
parts = int(fields[2].strip())
print('部品數量:',int(fields[2].strip()),'\n')
# 取得狀態
# instruct = '%CLOD WKCNTR 00\r\n%'
instruct = '%CLOD PRD3 00\r\n%'
client.sendall(instruct.encode())
flag = True
data=''
while flag:
lines = client.recv(1500).decode()
# print('len:',len(lines),lines)
data+=lines
if lines[-1]=='%':
flag = False
log=data.split('\n')
# print(data,'len:',len(data))
for i in range(10):
print(log[i])
return parts
except Exception as e:
print(ip, e)
return -1
finally:
client.close()
# def save_db(name='J44', qty=-1):
# try:
# conn = pymysql.Connect(user='root', password='1234', database='dademes', charset='utf8')
# cus = conn.cursor()
# if qty == -1:
# cus.execute('update kbequipment set running=%s where name=%s', ('关机', name))
# else:
# cus.execute('update kbequipment set running=%s, status=%s where name=%s', ('正常', qty, name))
# conn.commit()
# cus.close()
# conn.close()
# except Exception as e:
# print('机台号=%s保存数据异常,%s' % (name, e))
if __name__ == '__main__':
try:
for cnc_name, ip in cnc_config:
print('正在讀取機台號=%s,ip=%s' % (cnc_name, ip))
qty = get_from_brother(ip=ip)
print(qty)
# save_db(qty=qty, name=cnc_name)
except Exception as e:
print('__main__', e)
finally:
print('CNC數據讀取完畢... 30秒後再次讀取...')
# time.sleep(10)
| 31.784314
| 124
| 0.546885
|
import socket
import os
import time
from datetime import datetime
cnc_config = [('cnc46', "192.168.3.46")]
def get_from_brother(ip='127.0.0.1', port=10000):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(10)
try:
client.connect((ip, port))
instruct = '%CLOD WKCNTR 00\r\n%'
client.send(instruct.encode())
lines = client.recv(1500).decode()
nesep)
lines = [line for line in lines if line.startswith('A01')]
fields = lines[0].split(',')
parts = int(fields[2].strip())
print('部品數量:',int(fields[2].strip()),'\n')
instruct = '%CLOD PRD3 00\r\n%'
client.sendall(instruct.encode())
flag = True
data=''
while flag:
lines = client.recv(1500).decode()
data+=lines
if lines[-1]=='%':
flag = False
log=data.split('\n')
for i in range(10):
print(log[i])
return parts
except Exception as e:
print(ip, e)
return -1
finally:
client.close()
if __name__ == '__main__':
try:
for cnc_name, ip in cnc_config:
print('正在讀取機台號=%s,ip=%s' % (cnc_name, ip))
qty = get_from_brother(ip=ip)
print(qty)
except Exception as e:
print('__main__', e)
finally:
print('CNC數據讀取完畢... 30秒後再次讀取...')
| true
| true
|
f70af04d64e25ed6b095196a829ad2d2f12abde2
| 35,753
|
py
|
Python
|
mermaid/forward_models.py
|
HastingsGreer/mermaid
|
bd13c5fc427eb8cd9054973a8eaaeb302078182d
|
[
"Apache-2.0"
] | 120
|
2019-10-29T23:53:02.000Z
|
2022-03-30T02:59:58.000Z
|
mermaid/forward_models.py
|
HastingsGreer/mermaid
|
bd13c5fc427eb8cd9054973a8eaaeb302078182d
|
[
"Apache-2.0"
] | 10
|
2019-11-05T09:28:35.000Z
|
2022-01-09T19:12:51.000Z
|
mermaid/forward_models.py
|
HastingsGreer/mermaid
|
bd13c5fc427eb8cd9054973a8eaaeb302078182d
|
[
"Apache-2.0"
] | 19
|
2019-11-10T13:34:39.000Z
|
2022-03-13T20:30:10.000Z
|
"""
Package defining various dynamic forward models as well as convenience methods to generate the
right hand sides (RHS) of the related partial differential equations.
Currently, the following forward models are implemented:
#. An advection equation for images
#. An advection equation for maps
#. The EPDiff-equation parameterized using the vector-valued momentum for images
#. The EPDiff-equation parameterized using the vector-valued momentum for maps
#. The EPDiff-equation parameterized using the scalar-valued momentum for images
#. The EPDiff-equation parameterized using the scalar-valued momentum for maps
The images are expected to be tensors of dimension: BxCxXxYxZ (or BxCxX in 1D and BxCxXxY in 2D),
where B is the batch-size, C the number of channels, and X, Y, and Z are the spatial coordinate indices.
Futhermore the following (RHSs) are provided
#. Image advection
#. Map advection
#. Scalar conservation law
#. EPDiff
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from abc import ABCMeta, abstractmethod
import numpy as np
from . import finite_differences_multi_channel as fdm
from . import utils
from .data_wrapper import MyTensor
from future.utils import with_metaclass
import torch.nn as nn
import torch
class RHSLibrary(object):
"""
Convenience class to quickly generate various right hand sides (RHSs) of popular partial differential
equations. In this way new forward models can be written with minimal code duplication.
"""
def __init__(self, spacing, use_neumann_BC_for_map=False):
"""
Constructor
:param spacing: Spacing for the images. This will be an array with 1, 2, or 3 entries in 1D, 2D, and 3D respectively.
"""
self.spacing = spacing
"""spatial spacing"""
self.spacing_min = np.min(spacing)
""" min of the spacing"""
self.spacing_ratio = spacing/self.spacing_min
self.fdt_ne = fdm.FD_torch_multi_channel(spacing,mode='neumann_zero')
"""torch finite differencing support neumann zero"""
self.fdt_le = fdm.FD_torch_multi_channel( spacing, mode='linear')
"""torch finite differencing support linear extrapolation"""
self.fdt_di = fdm.FD_torch_multi_channel(spacing, mode='dirichlet_zero')
"""torch finite differencing support dirichlet zero"""
self.dim = len(self.spacing)
"""spatial dimension"""
self.use_neumann_BC_for_map = use_neumann_BC_for_map
"""If True uses zero Neumann boundary conditions also for evolutions of the map, if False uses linear extrapolation"""
def rhs_advect_image_multiNC(self,I,v):
'''
Advects a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-\\nabla I^Tv`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
rhs_ret= self._rhs_advect_image_multiN(I, v )
return rhs_ret
def _rhs_advect_image_multiN(self,I,v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the advection equation for one channel BxXxYxZ
"""
if self.dim == 1:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1]
elif self.dim == 2:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]
elif self.dim == 3:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]-self.fdt_ne.dZc(I)*v[:,2:3]
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_scalar_conservation_multiNC(self, I, v):
"""
Scalar conservation law for a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-div(Iv)`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the scalar conservation law equations involved BxCxXxYxZ
"""
rhs_ret=self._rhs_scalar_conservation_multiN(I, v)
return rhs_ret
def _rhs_scalar_conservation_multiN(self, I, v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the scalar-conservation law equation for one channel BxXxYxZ
"""
if self.dim==1:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1])
elif self.dim==2:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])
elif self.dim==3:
rhs_ret = -self.fdt_ne.dXc(I* v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])-self.fdt_ne.dZc(I*v[:,2:3])
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_lagrangian_evolve_map_multiNC(self, phi, v):
"""
Evolves a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D).
This is used to evolve the map going from source to target image. Requires interpolation
so should if at all possible not be used as part of an optimization.
the idea of compute inverse map is due to the map is defined
in the source space, referring to point move to where,(compared with the target space, refers to where it comes from)
in this situation, we only need to capture the velocity at that place and accumulate along the time step
since advecton function is moves the image (or phi based image) by v step, which means v is shared by different coordinate,
so it is safe to compute in this way.
:math:`v\circ\phi`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the evolution equations involved BxCxXxYxZ
:param phi:
:param v:
:return:
"""
rhs_ret = utils.compute_warped_image_multiNC(v, phi, spacing=self.spacing, spline_order=1,zero_boundary=False)
return rhs_ret
def rhs_advect_map_multiNC(self, phi, v):
'''
Advects a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-D\\phi v`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
sz = phi.size()
rhs_ret = self._rhs_advect_map_call(phi, v)
return rhs_ret
def _rhs_advect_map_call(self,phi,v):
"""
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return rhsphi: Returns the RHS of the advection equations involved BxCxXxYxZ
"""
fdc = self.fdt_le # use order boundary conditions (interpolation)
if self.dim==1:
dxc_phi = -fdc.dXc(phi)
rhsphi = v[:, 0:1] * dxc_phi
elif self.dim==2:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
rhsphi = v[:, 0:1] * dxc_phi + v[:, 1:2] * dyc_phi
elif self.dim==3:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
dzc_phi = -fdc.dZc(phi)
rhsphi = v[:,0:1]*dxc_phi + v[:,1:2]*dyc_phi + v[:,2:3]*dzc_phi
else:
raise ValueError('Only supported up to dimension 3')
return rhsphi
def rhs_epdiff_multiNC(self, m, v):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_epdiff_call(m, v, rhs_ret)
return rhs_ret
def _rhs_epdiff_call(self, m, v,rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
#fdc = self.fdt_le
if self.dim == 1:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dxc_v = -fdc.dXc(v)
dxc_v_multi_m = dxc_v * m
rhsm[:]= dxc_mv0 + dxc_v_multi_m
elif self.dim == 2:
# (m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm (EPDiff equation)
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dc_mv_sum = dxc_mv0 + dyc_mv1
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dxc_v_multi_m = dxc_v * m
dyc_v_multi_m = dyc_v * m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m, 1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m, 1)
rhsm[:,0, :, :] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:,1, :, :] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
elif self.dim == 3:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dzc_mv2 = -fdc.dZc(m*v[:,2:3])
dc_mv_sum = dxc_mv0 + dyc_mv1 + dzc_mv2
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dzc_v = -fdc.dZc(v)
dxc_v_multi_m = dxc_v*m
dyc_v_multi_m = dyc_v*m
dzc_v_multi_m = dzc_v*m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m,1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m,1)
dzc_v_multi_m_sum = torch.sum(dzc_v_multi_m,1)
rhsm[:, 0] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:, 1] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
rhsm[:, 2] = dc_mv_sum[:,2] + dzc_v_multi_m_sum
else:
raise ValueError('Only supported up to dimension ')
return rhsm
def rhs_adapt_epdiff_wkw_multiNC(self, m, v,w, sm_wm,smoother):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_adapt_epdiff_wkw_call(m, v,w,sm_wm,smoother, rhs_ret)
return rhs_ret
def _rhs_adapt_epdiff_wkw_call(self, m, v,w,sm_wm, smoother, rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param sm_wm: smoothed(wm) batch x K x dim x X x Y x ...
:param w: smoothed(wm) batch x K x X x Y x ...
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
rhs = self._rhs_epdiff_call(m,v,rhsm)
ret_var = torch.empty_like(rhs)
# ret_var, rhs should batch x dim x X x Yx ..
dim = m.shape[1]
sz = [m.shape[0]]+[1]+list(m.shape[1:]) # batchx1xdimx X x Y
m = m.view(*sz)
m_sm_wm = m* sm_wm
m_sm_wm = m_sm_wm.sum(dim=2)
sm_m_sm_wm = smoother.smooth(m_sm_wm) # batchx K x X xY...
dxc_w = fdc.dXc(w)
dc_w_list = [dxc_w]
if dim == 2 or dim == 3:
dyc_w = fdc.dYc(w)
dc_w_list.append(dyc_w)
if dim == 3:
dzc_w = fdc.dZc(w) # batch x K x X xY ...
dc_w_list.append(dzc_w)
for i in range(dim):
ret_var[:, i] = rhs[:, i] + (sm_m_sm_wm* dc_w_list[i]).sum(1)
return ret_var
class ForwardModel(with_metaclass(ABCMeta, object)):
"""
Abstract forward model class. Should never be instantiated.
Derived classes require the definition of f(self,t,x,u,pars) and u(self,t,pars).
These functions will be used for integration: x'(t) = f(t,x(t),u(t))
"""
def __init__(self, sz, spacing, params=None):
'''
Constructor of abstract forward model class
:param sz: size of images
:param spacing: numpy array for spacing in x,y,z directions
'''
self.dim = spacing.size # spatial dimension of the problem
"""spatial dimension"""
self.spacing = spacing
"""spatial spacing"""
self.sz = sz
"""image size (BxCxXxYxZ)"""
self.params = params
"""ParameterDict instance holding parameters"""
self.rhs = RHSLibrary(self.spacing)
"""rhs library support"""
if self.dim>3 or self.dim<1:
raise ValueError('Forward models are currently only supported in dimensions 1 to 3')
self.debug_mode_on =False
@abstractmethod
def f(self,t,x,u,pars,variables_from_optimizer=None):
"""
Function to be integrated
:param t: time
:param x: state
:param u: input
:param pars: optional parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the function value, should return a list (to support easy concatenations of states)
"""
pass
def u(self,t,pars,variables_from_optimizer=None):
"""
External input
:param t: time
:param pars: parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the external input
"""
return []
class AdvectMap(ForwardModel):
"""
Forward model to advect an n-D map using a transport equation: :math:`\\Phi_t + D\\Phi v = 0`.
v is treated as an external argument and \Phi is the state
"""
def __init__(self, sz, spacing, params=None,compute_inverse_map=False):
super(AdvectMap,self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation:
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the map, \Phi, itself (assumes 3D-5D array; [nrI,0,:,:] x-coors; [nrI,1,:,:] y-coors; ...
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [phi]
"""
if self.compute_inverse_map:
return [self.rhs.rhs_advect_map_multiNC(x[0], u),self.rhs.rhs_lagrangian_evolve_map_multiNC(x[1], u)]
else:
return [self.rhs.rhs_advect_map_multiNC(x[0],u)]
class AdvectImage(ForwardModel):
"""
Forward model to advect an image using a transport equation: :math:`I_t + \\nabla I^Tv = 0`.
v is treated as an external argument and I is the state
"""
def __init__(self, sz, spacing, params=None):
super(AdvectImage, self).__init__(sz, spacing,params)
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation: :math:`-\\nabla I^T v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, I, itself (supports multiple images and channels)
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [I]
"""
return [self.rhs.rhs_advect_image_multiNC(x[0],u)]
class EPDiffImage(ForwardModel):
"""
Forward model for the EPdiff equation. State is the momentum, m, and the image I:
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`I_t+\\nabla I^Tv=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffImage, self).__init__(sz, spacing,params)
self.smoother = smoother
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:param t: time (ignored; not time-dependent)
:param x: state, here the vector momentum, m, and the image, I
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,I]
"""
# assume x[0] is m and x[1] is I for the state
m = x[0]
I = x[1]
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I': I}),variables_from_optimizer)
# print('max(|v|) = ' + str( v.abs().max() ))
return [self.rhs.rhs_epdiff_multiNC(m,v), self.rhs.rhs_advect_image_multiNC(I,v)]
class EPDiffMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None,compute_inverse_map=False):
super(EPDiffMap, self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.use_net = True if self.params['smoother']['type'] == 'adaptiveNet' else False
def debugging(self,input,t):
x = utils.checkNan(input)
if np.sum(x):
print("find nan at {} step".format(t))
print("flag m: {}, ".format(x[0]))
print("flag v: {},".format(x[1]))
print("flag phi: {},".format(x[2]))
print("flag new_m: {},".format(x[3]))
print("flag new_phi: {},".format(x[4]))
raise ValueError("nan error")
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m = m.clamp(max=1., min=-1.)
phi = x[1]
if self.compute_inverse_map:
phi_inv = x[2]
if not self.use_net:
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'phi':phi}),variables_from_optimizer)
else:
v = self.smoother.adaptive_smooth(m, phi, using_map=True)
# print('max(|v|) = ' + str( v.abs().max() ))
if self.compute_inverse_map:
ret_val= [self.rhs.rhs_epdiff_multiNC(m,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
new_m = self.rhs.rhs_epdiff_multiNC(m,v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi,v)
ret_val= [new_m, new_phi]
return ret_val
class EPDiffAdaptMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False, update_sm_by_advect= True, update_sm_with_interpolation=True,compute_on_initial_map=True):
super(EPDiffAdaptMap, self).__init__(sz, spacing, params)
from . import module_parameters as pars
from . import smoother_factory as sf
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.update_sm_by_advect = update_sm_by_advect
self.use_the_first_step_penalty = True
self.update_sm_with_interpolation = update_sm_with_interpolation
self.compute_on_initial_map=compute_on_initial_map
self.update_sm_weight=None
self.velocity_mask = None
self.debug_mode_on = False
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] =self.params['smoother']['deep_smoother']['deep_network_local_weight_smoothing']
self.embedded_smoother = sf.SmootherFactory(sz[2:], spacing).create_smoother(
s_m_params)
""" if only take the first step penalty as the total penalty, otherwise accumluate the penalty"""
def debug_nan(self, input, t,name=''):
x = utils.checkNan([input])
if np.sum(x):
# print(input[0])
print("find nan at {} step, {} with number {}".format(t,name,x[0]))
raise ValueError("nan error")
def init_zero_sm_weight(self,sm_weight):
self.update_sm_weight = torch.zeros_like(sm_weight).detach()
def init_velocity_mask(self,velocity_mask):
self.velocity_mask = velocity_mask
def debug_distrib(self,var,name):
var = var.detach().cpu().numpy()
density,_= np.histogram(var,[-100,-10,-1,0,1,10,100],density=True)
print("{} distri:{}".format(name,density))
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m=m.clamp(max=1., min=-1.)
phi = x[1]
return_val_name = []
sm_weight = None
if self.update_sm_by_advect:
if not self.update_sm_with_interpolation:
sm_weight_pre = x[2]
sm_weight = self.embedded_smoother.smooth(sm_weight_pre)
v, extra_ret = self.smoother.smooth(m, None, {'w':sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v* self.velocity_mask
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight_pre = self.rhs.rhs_advect_map_multiNC(sm_weight_pre, v)
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m, v, new_sm_weight_pre, extra_ret,
self.embedded_smoother)
ret_val = [new_m, new_phi,new_sm_weight_pre]
return_val_name =['new_m','new_phi','new_sm_weight']
else:
if self.compute_on_initial_map:
sm_weight = x[2]
sm_phi = x[3]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, sm_phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
#print('t{},m min, mean,max {} {} {}'.format(t,m.min().item(),m.mean().item(),m.max().item()))
v,extra_ret = self.smoother.smooth(m,None,{'w': new_sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_phi = self.rhs.rhs_advect_map_multiNC(sm_phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi,new_sm_weight,new_sm_phi]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight','new_sm_phi']
else: #todo just attention here is what we currently used
sm_weight = x[2]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
v, extra_ret = self.smoother.smooth(m, None,{'w':new_sm_weight}, multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi, new_sm_weight]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight']
else:
if not t==0:
if self.use_the_first_step_penalty:
self.smoother.disable_penalty_computation()
else:
self.smoother.enable_accumulated_penalty()
I = utils.compute_warped_image_multiNC(pars['I0'], phi, self.spacing, 1,zero_boundary=True)
pars['I'] = I.detach() # TODO check whether I should be detached here
v = self.smoother.smooth(m, None, pars, variables_from_optimizer)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_epdiff_multiNC(m, v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
ret_val = [new_m, new_phi]
return_val_name =['new_m','new_phi']
if self.debug_mode_on:
toshows = [m, v,phi]+ret_val if sm_weight is None else [m, v,phi]+ret_val +[sm_weight]
name = ['m', 'v','phi']+return_val_name if sm_weight is None else ['m', 'v','phi']+return_val_name +['sm_weight']
for i, toshow in enumerate(toshows):
print('t{},{} min, mean,max {} {} {}'.format(t, name[i], toshow.min().item(), toshow.mean().item(),
toshow.max().item()))
self.debug_distrib(toshow, name[i])
self.debug_nan(toshow,t,name[i])
return ret_val
# print('max(|v|) = ' + str( v.abs().max() ))
class EPDiffScalarMomentum(ForwardModel):
"""
Base class for scalar momentum EPDiff solutions. Defines a smoother that can be commonly used.
"""
def __init__(self, sz, spacing, smoother, params):
super(EPDiffScalarMomentum,self).__init__(sz,spacing,params)
self.smoother = smoother
class EPDiffScalarMomentumImage(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPdiff equation. State is the scalar momentum, lam, and the image I
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:'m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffScalarMomentumImage, self).__init__(sz, spacing, smoother, params)
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math: `-div(\\lambda v)`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, and the image, I, itself
:param u: no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I]
"""
# assume x[0] is \lambda and x[1] is I for the state
lam = x[0]
I = x[1]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
# advection for I, scalar-conservation law for lam
return [self.rhs.rhs_scalar_conservation_multiNC(lam, v), self.rhs.rhs_advect_image_multiNC(I, v)]
class EPDiffScalarMomentumMap(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPDiff equation. State is the scalar momentum, lam, the image, I, and the transform, phi.
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
:math:`\\Phi_t+D\\Phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False):
super(EPDiffScalarMomentumMap, self).__init__(sz,spacing, smoother, params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math:`-div(\\lambda v)`
:math:`-D\\Phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, the image, I, and the transform, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I,phi]
"""
# assume x[0] is lam and x[1] is I and x[2] is phi for the state
lam = x[0]
I = x[1]
phi = x[2]
if self.compute_inverse_map:
phi_inv = x[3]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
# todo: replace this by phi again
#v = self.smoother.smooth(m,None,[phi,True],variables_from_optimizer)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
if self.compute_inverse_map:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v)]
return ret_val
| 40.17191
| 176
| 0.609795
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from abc import ABCMeta, abstractmethod
import numpy as np
from . import finite_differences_multi_channel as fdm
from . import utils
from .data_wrapper import MyTensor
from future.utils import with_metaclass
import torch.nn as nn
import torch
class RHSLibrary(object):
def __init__(self, spacing, use_neumann_BC_for_map=False):
self.spacing = spacing
self.spacing_min = np.min(spacing)
self.spacing_ratio = spacing/self.spacing_min
self.fdt_ne = fdm.FD_torch_multi_channel(spacing,mode='neumann_zero')
self.fdt_le = fdm.FD_torch_multi_channel( spacing, mode='linear')
self.fdt_di = fdm.FD_torch_multi_channel(spacing, mode='dirichlet_zero')
self.dim = len(self.spacing)
self.use_neumann_BC_for_map = use_neumann_BC_for_map
def rhs_advect_image_multiNC(self,I,v):
rhs_ret= self._rhs_advect_image_multiN(I, v )
return rhs_ret
def _rhs_advect_image_multiN(self,I,v):
if self.dim == 1:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1]
elif self.dim == 2:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]
elif self.dim == 3:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]-self.fdt_ne.dZc(I)*v[:,2:3]
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_scalar_conservation_multiNC(self, I, v):
rhs_ret=self._rhs_scalar_conservation_multiN(I, v)
return rhs_ret
def _rhs_scalar_conservation_multiN(self, I, v):
if self.dim==1:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1])
elif self.dim==2:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])
elif self.dim==3:
rhs_ret = -self.fdt_ne.dXc(I* v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])-self.fdt_ne.dZc(I*v[:,2:3])
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_lagrangian_evolve_map_multiNC(self, phi, v):
rhs_ret = utils.compute_warped_image_multiNC(v, phi, spacing=self.spacing, spline_order=1,zero_boundary=False)
return rhs_ret
def rhs_advect_map_multiNC(self, phi, v):
sz = phi.size()
rhs_ret = self._rhs_advect_map_call(phi, v)
return rhs_ret
def _rhs_advect_map_call(self,phi,v):
fdc = self.fdt_le
if self.dim==1:
dxc_phi = -fdc.dXc(phi)
rhsphi = v[:, 0:1] * dxc_phi
elif self.dim==2:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
rhsphi = v[:, 0:1] * dxc_phi + v[:, 1:2] * dyc_phi
elif self.dim==3:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
dzc_phi = -fdc.dZc(phi)
rhsphi = v[:,0:1]*dxc_phi + v[:,1:2]*dyc_phi + v[:,2:3]*dzc_phi
else:
raise ValueError('Only supported up to dimension 3')
return rhsphi
def rhs_epdiff_multiNC(self, m, v):
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_epdiff_call(m, v, rhs_ret)
return rhs_ret
def _rhs_epdiff_call(self, m, v,rhsm):
if self.dim == 1:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dxc_v = -fdc.dXc(v)
dxc_v_multi_m = dxc_v * m
rhsm[:]= dxc_mv0 + dxc_v_multi_m
elif self.dim == 2:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dc_mv_sum = dxc_mv0 + dyc_mv1
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dxc_v_multi_m = dxc_v * m
dyc_v_multi_m = dyc_v * m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m, 1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m, 1)
rhsm[:,0, :, :] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:,1, :, :] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
elif self.dim == 3:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dzc_mv2 = -fdc.dZc(m*v[:,2:3])
dc_mv_sum = dxc_mv0 + dyc_mv1 + dzc_mv2
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dzc_v = -fdc.dZc(v)
dxc_v_multi_m = dxc_v*m
dyc_v_multi_m = dyc_v*m
dzc_v_multi_m = dzc_v*m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m,1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m,1)
dzc_v_multi_m_sum = torch.sum(dzc_v_multi_m,1)
rhsm[:, 0] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:, 1] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
rhsm[:, 2] = dc_mv_sum[:,2] + dzc_v_multi_m_sum
else:
raise ValueError('Only supported up to dimension ')
return rhsm
def rhs_adapt_epdiff_wkw_multiNC(self, m, v,w, sm_wm,smoother):
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_adapt_epdiff_wkw_call(m, v,w,sm_wm,smoother, rhs_ret)
return rhs_ret
def _rhs_adapt_epdiff_wkw_call(self, m, v,w,sm_wm, smoother, rhsm):
f._rhs_epdiff_call(m,v,rhsm)
ret_var = torch.empty_like(rhs)
dim = m.shape[1]
sz = [m.shape[0]]+[1]+list(m.shape[1:])
m = m.view(*sz)
m_sm_wm = m* sm_wm
m_sm_wm = m_sm_wm.sum(dim=2)
sm_m_sm_wm = smoother.smooth(m_sm_wm)
dxc_w = fdc.dXc(w)
dc_w_list = [dxc_w]
if dim == 2 or dim == 3:
dyc_w = fdc.dYc(w)
dc_w_list.append(dyc_w)
if dim == 3:
dzc_w = fdc.dZc(w)
dc_w_list.append(dzc_w)
for i in range(dim):
ret_var[:, i] = rhs[:, i] + (sm_m_sm_wm* dc_w_list[i]).sum(1)
return ret_var
class ForwardModel(with_metaclass(ABCMeta, object)):
def __init__(self, sz, spacing, params=None):
self.dim = spacing.size
self.spacing = spacing
self.sz = sz
self.params = params
self.rhs = RHSLibrary(self.spacing)
if self.dim>3 or self.dim<1:
raise ValueError('Forward models are currently only supported in dimensions 1 to 3')
self.debug_mode_on =False
@abstractmethod
def f(self,t,x,u,pars,variables_from_optimizer=None):
pass
def u(self,t,pars,variables_from_optimizer=None):
return []
class AdvectMap(ForwardModel):
def __init__(self, sz, spacing, params=None,compute_inverse_map=False):
super(AdvectMap,self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
def u(self,t, pars, variables_from_optimizer=None):
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
if self.compute_inverse_map:
return [self.rhs.rhs_advect_map_multiNC(x[0], u),self.rhs.rhs_lagrangian_evolve_map_multiNC(x[1], u)]
else:
return [self.rhs.rhs_advect_map_multiNC(x[0],u)]
class AdvectImage(ForwardModel):
def __init__(self, sz, spacing, params=None):
super(AdvectImage, self).__init__(sz, spacing,params)
def u(self,t, pars, variables_from_optimizer=None):
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
return [self.rhs.rhs_advect_image_multiNC(x[0],u)]
class EPDiffImage(ForwardModel):
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffImage, self).__init__(sz, spacing,params)
self.smoother = smoother
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
m = x[0]
I = x[1]
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I': I}),variables_from_optimizer)
return [self.rhs.rhs_epdiff_multiNC(m,v), self.rhs.rhs_advect_image_multiNC(I,v)]
class EPDiffMap(ForwardModel):
def __init__(self, sz, spacing, smoother, params=None,compute_inverse_map=False):
super(EPDiffMap, self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
self.smoother = smoother
self.use_net = True if self.params['smoother']['type'] == 'adaptiveNet' else False
def debugging(self,input,t):
x = utils.checkNan(input)
if np.sum(x):
print("find nan at {} step".format(t))
print("flag m: {}, ".format(x[0]))
print("flag v: {},".format(x[1]))
print("flag phi: {},".format(x[2]))
print("flag new_m: {},".format(x[3]))
print("flag new_phi: {},".format(x[4]))
raise ValueError("nan error")
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
m = x[0]
m = m.clamp(max=1., min=-1.)
phi = x[1]
if self.compute_inverse_map:
phi_inv = x[2]
if not self.use_net:
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'phi':phi}),variables_from_optimizer)
else:
v = self.smoother.adaptive_smooth(m, phi, using_map=True)
if self.compute_inverse_map:
ret_val= [self.rhs.rhs_epdiff_multiNC(m,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
new_m = self.rhs.rhs_epdiff_multiNC(m,v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi,v)
ret_val= [new_m, new_phi]
return ret_val
class EPDiffAdaptMap(ForwardModel):
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False, update_sm_by_advect= True, update_sm_with_interpolation=True,compute_on_initial_map=True):
super(EPDiffAdaptMap, self).__init__(sz, spacing, params)
from . import module_parameters as pars
from . import smoother_factory as sf
self.compute_inverse_map = compute_inverse_map
self.smoother = smoother
self.update_sm_by_advect = update_sm_by_advect
self.use_the_first_step_penalty = True
self.update_sm_with_interpolation = update_sm_with_interpolation
self.compute_on_initial_map=compute_on_initial_map
self.update_sm_weight=None
self.velocity_mask = None
self.debug_mode_on = False
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] =self.params['smoother']['deep_smoother']['deep_network_local_weight_smoothing']
self.embedded_smoother = sf.SmootherFactory(sz[2:], spacing).create_smoother(
s_m_params)
def debug_nan(self, input, t,name=''):
x = utils.checkNan([input])
if np.sum(x):
print("find nan at {} step, {} with number {}".format(t,name,x[0]))
raise ValueError("nan error")
def init_zero_sm_weight(self,sm_weight):
self.update_sm_weight = torch.zeros_like(sm_weight).detach()
def init_velocity_mask(self,velocity_mask):
self.velocity_mask = velocity_mask
def debug_distrib(self,var,name):
var = var.detach().cpu().numpy()
density,_= np.histogram(var,[-100,-10,-1,0,1,10,100],density=True)
print("{} distri:{}".format(name,density))
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
m = x[0]
m=m.clamp(max=1., min=-1.)
phi = x[1]
return_val_name = []
sm_weight = None
if self.update_sm_by_advect:
if not self.update_sm_with_interpolation:
sm_weight_pre = x[2]
sm_weight = self.embedded_smoother.smooth(sm_weight_pre)
v, extra_ret = self.smoother.smooth(m, None, {'w':sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v* self.velocity_mask
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight_pre = self.rhs.rhs_advect_map_multiNC(sm_weight_pre, v)
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m, v, new_sm_weight_pre, extra_ret,
self.embedded_smoother)
ret_val = [new_m, new_phi,new_sm_weight_pre]
return_val_name =['new_m','new_phi','new_sm_weight']
else:
if self.compute_on_initial_map:
sm_weight = x[2]
sm_phi = x[3]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, sm_phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
v,extra_ret = self.smoother.smooth(m,None,{'w': new_sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_phi = self.rhs.rhs_advect_map_multiNC(sm_phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi,new_sm_weight,new_sm_phi]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight','new_sm_phi']
else:
sm_weight = x[2]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
v, extra_ret = self.smoother.smooth(m, None,{'w':new_sm_weight}, multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi, new_sm_weight]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight']
else:
if not t==0:
if self.use_the_first_step_penalty:
self.smoother.disable_penalty_computation()
else:
self.smoother.enable_accumulated_penalty()
I = utils.compute_warped_image_multiNC(pars['I0'], phi, self.spacing, 1,zero_boundary=True)
pars['I'] = I.detach()
v = self.smoother.smooth(m, None, pars, variables_from_optimizer)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_epdiff_multiNC(m, v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
ret_val = [new_m, new_phi]
return_val_name =['new_m','new_phi']
if self.debug_mode_on:
toshows = [m, v,phi]+ret_val if sm_weight is None else [m, v,phi]+ret_val +[sm_weight]
name = ['m', 'v','phi']+return_val_name if sm_weight is None else ['m', 'v','phi']+return_val_name +['sm_weight']
for i, toshow in enumerate(toshows):
print('t{},{} min, mean,max {} {} {}'.format(t, name[i], toshow.min().item(), toshow.mean().item(),
toshow.max().item()))
self.debug_distrib(toshow, name[i])
self.debug_nan(toshow,t,name[i])
return ret_val
class EPDiffScalarMomentum(ForwardModel):
def __init__(self, sz, spacing, smoother, params):
super(EPDiffScalarMomentum,self).__init__(sz,spacing,params)
self.smoother = smoother
class EPDiffScalarMomentumImage(EPDiffScalarMomentum):
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffScalarMomentumImage, self).__init__(sz, spacing, smoother, params)
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
lam = x[0]
I = x[1]
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
return [self.rhs.rhs_scalar_conservation_multiNC(lam, v), self.rhs.rhs_advect_image_multiNC(I, v)]
class EPDiffScalarMomentumMap(EPDiffScalarMomentum):
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False):
super(EPDiffScalarMomentumMap, self).__init__(sz,spacing, smoother, params)
self.compute_inverse_map = compute_inverse_map
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
lam = x[0]
I = x[1]
phi = x[2]
if self.compute_inverse_map:
phi_inv = x[3]
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
if self.compute_inverse_map:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v)]
return ret_val
| true
| true
|
f70af051aa8623d4b8e4b7eaf375f8307bb9bfdb
| 3,988
|
py
|
Python
|
stackdriver_log_formatter/formatter.py
|
tmshn/python-stackdriver-formatter
|
7cb424283cae47a56a2e4f0c98cb654e6c819bf6
|
[
"MIT"
] | 2
|
2020-03-13T06:07:35.000Z
|
2020-07-02T13:24:44.000Z
|
stackdriver_log_formatter/formatter.py
|
tmshn/python-stackdriver-formatter
|
7cb424283cae47a56a2e4f0c98cb654e6c819bf6
|
[
"MIT"
] | 9
|
2019-08-13T10:00:07.000Z
|
2019-08-13T10:11:24.000Z
|
stackdriver_log_formatter/formatter.py
|
tmshn/python-stackdriver-formatter
|
7cb424283cae47a56a2e4f0c98cb654e6c819bf6
|
[
"MIT"
] | null | null | null |
from collections.abc import Mapping
from datetime import datetime
import logging
from typing import Optional
from stackdriver_log_formatter.serializer import DefaultFunc, dumps
class StackdriverLogFormatter(logging.Formatter):
"""Log formatter suitable for Stackdriver Logging.
This formatter print log as a single-line json with appropriate fields.
For detailed information about each fields, refer to Stackdriver's API document [1]_
and fluent-plugin-google-cloud source [2]_.
References
----------
.. [1]: https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
.. [2]: https://github.com/GoogleCloudPlatform/fluent-plugin-google-cloud
Example
-------
>>> # setup
>>> logging.basicConfig(level=logging.INFO, stream=sys.stdout)
>>> logging.root.handlers[0].setFormatter(StackdriverLogFormatter())
>>> # logging
>>> logger = logging.getLogger(__name__)
>>> logger.info('Hello world')
>>> # With custom fields (shown in 'jsonPayload' in Stackdriver)
>>> logger.info('bla bla bla', {'customFiled': 123})
>>> logger.info('bla bla bla: %(customeField)s', {'customFiled': 123})
>>> # With exception
>>> try:
... 1 / 0
... except Exception:
... logger.exception('Oops, an error occured!')
"""
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, *, default: DefaultFunc=None):
"""Initialize formatter.
Keyword Arguments
-----------------
default: function or None, optional
A function called to serialize non-standard objects.
It should return a json serializable version of the object or raise a TypeError.
"""
self.default = default
def formatTime(self, record: logging.LogRecord, datefmt: Optional[str]=None) -> str:
"""Return the creation time of the specified LogRecord as formatted text.
The format is always ISO8601 in UTC ('Z'-suffixed), so `datefmt` argument is ignored.
We use `datetime.datetime` rather than `time.time` to print subseconds.
"""
return datetime.utcfromtimestamp(record.created).strftime(self.DATE_FORMAT)
def usesTime(self) -> bool:
"""Check if the format uses the creation time of the record.
This is always true.
"""
return True
def format(self, record: logging.LogRecord) -> str:
"""Format the specified record as text.
This will be a single-line json with appropriate fields.
"""
record.message = record.getMessage()
record.asctime = self.formatTime(record)
if record.exc_info and not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
log_obj = {
'severity': record.levelname,
'time': record.asctime,
'message': record.message,
'logger': record.name,
'module': record.module,
'logging.googleapis.com/sourceLocation': {
'file': record.pathname,
'line': record.lineno,
'function': record.funcName,
},
'process': {
'name': record.processName,
'id': record.process,
},
'thread': {
'name': record.threadName,
'id': record.thread,
},
}
if record.exc_info:
log_obj['exceptionType'] = type(record.exc_info[1]).__name__
if record.exc_text:
log_obj['stackTrace'] = record.exc_text
if record.stack_info:
log_obj['stackInfo'] = self.formatStack(record.stack_info)
if isinstance(record.args, Mapping):
for k, v in record.args.items():
if k in log_obj or k in ('exceptionType', 'stackTrace', 'stackInfo'):
continue
log_obj.setdefault(k, v)
return dumps(log_obj, default=self.default)
| 34.982456
| 93
| 0.604313
|
from collections.abc import Mapping
from datetime import datetime
import logging
from typing import Optional
from stackdriver_log_formatter.serializer import DefaultFunc, dumps
class StackdriverLogFormatter(logging.Formatter):
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, *, default: DefaultFunc=None):
self.default = default
def formatTime(self, record: logging.LogRecord, datefmt: Optional[str]=None) -> str:
return datetime.utcfromtimestamp(record.created).strftime(self.DATE_FORMAT)
def usesTime(self) -> bool:
return True
def format(self, record: logging.LogRecord) -> str:
record.message = record.getMessage()
record.asctime = self.formatTime(record)
if record.exc_info and not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
log_obj = {
'severity': record.levelname,
'time': record.asctime,
'message': record.message,
'logger': record.name,
'module': record.module,
'logging.googleapis.com/sourceLocation': {
'file': record.pathname,
'line': record.lineno,
'function': record.funcName,
},
'process': {
'name': record.processName,
'id': record.process,
},
'thread': {
'name': record.threadName,
'id': record.thread,
},
}
if record.exc_info:
log_obj['exceptionType'] = type(record.exc_info[1]).__name__
if record.exc_text:
log_obj['stackTrace'] = record.exc_text
if record.stack_info:
log_obj['stackInfo'] = self.formatStack(record.stack_info)
if isinstance(record.args, Mapping):
for k, v in record.args.items():
if k in log_obj or k in ('exceptionType', 'stackTrace', 'stackInfo'):
continue
log_obj.setdefault(k, v)
return dumps(log_obj, default=self.default)
| true
| true
|
f70af54400dc4d7f00f926ece3a5f21098a359e1
| 3,841
|
py
|
Python
|
Redis/owlbot.py
|
chingor13/google-cloud-php
|
b110b4b6d354d2a74674ce3a63d619f3f14e84a2
|
[
"Apache-2.0"
] | 411
|
2016-09-02T15:39:15.000Z
|
2018-09-20T15:15:20.000Z
|
Redis/owlbot.py
|
chingor13/google-cloud-php
|
b110b4b6d354d2a74674ce3a63d619f3f14e84a2
|
[
"Apache-2.0"
] | 786
|
2016-08-23T01:22:16.000Z
|
2018-09-20T19:26:41.000Z
|
Redis/owlbot.py
|
chingor13/google-cloud-php
|
b110b4b6d354d2a74674ce3a63d619f3f14e84a2
|
[
"Apache-2.0"
] | 182
|
2016-08-23T13:29:37.000Z
|
2018-09-20T17:27:06.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/Redis").resolve()
dest = Path().resolve()
# Added so that we can pass copy_excludes in the owlbot_main() call
_tracked_paths.add(src)
php.owlbot_main(
src=src,
dest=dest,
copy_excludes=[
src / "*/src/V1/CloudRedisClient.php",
src / "*/src/V1beta1/CloudRedisClient.php"
]
)
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# V1 is GA, so remove @experimental tags
s.replace(
'src/V1/**/*Client.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
# Fix class references in gapic samples
for version in ['V1', 'V1beta1']:
pathExpr = 'src/' + version + '/Gapic/CloudRedisGapicClient.php'
types = {
'new CloudRedisClient': r'new Google\\Cloud\\Redis\\'+ version + r'\\CloudRedisClient',
'new Instance': r'new Google\\Cloud\\Redis\\' + version + r'\\Instance',
'= Tier::': r'= Google\\Cloud\\Redis\\' + version + r'\\Instance\\Tier::',
'new FieldMask': r'new Google\\Protobuf\\FieldMask',
'new InputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\InputConfig',
'new OutputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\OutputConfig',
'= DataProtectionMode': r'= Google\\Cloud\\Redis\\' + version + r'\\FailoverInstanceRequest\\DataProtectionMode::'
}
for search, replace in types.items():
s.replace(
pathExpr,
search,
replace
)
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
| 30.484127
| 122
| 0.643582
|
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/Redis").resolve()
dest = Path().resolve()
_tracked_paths.add(src)
php.owlbot_main(
src=src,
dest=dest,
copy_excludes=[
src / "*/src/V1/CloudRedisClient.php",
src / "*/src/V1beta1/CloudRedisClient.php"
]
)
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
s.replace(
'src/V1/**/*Client.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
for version in ['V1', 'V1beta1']:
pathExpr = 'src/' + version + '/Gapic/CloudRedisGapicClient.php'
types = {
'new CloudRedisClient': r'new Google\\Cloud\\Redis\\'+ version + r'\\CloudRedisClient',
'new Instance': r'new Google\\Cloud\\Redis\\' + version + r'\\Instance',
'= Tier::': r'= Google\\Cloud\\Redis\\' + version + r'\\Instance\\Tier::',
'new FieldMask': r'new Google\\Protobuf\\FieldMask',
'new InputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\InputConfig',
'new OutputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\OutputConfig',
'= DataProtectionMode': r'= Google\\Cloud\\Redis\\' + version + r'\\FailoverInstanceRequest\\DataProtectionMode::'
}
for search, replace in types.items():
s.replace(
pathExpr,
search,
replace
)
}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
loud.google.com\2)"
)
| true
| true
|
f70af59cb50cd0fd6732b83ac04eb6a2a19b41d6
| 902
|
py
|
Python
|
car_detection.py
|
jitendrasb24/Car-Detection-OpenCV
|
92a68158bde3ae6168d09b38a6301af4362425ec
|
[
"MIT"
] | 1
|
2021-07-30T21:58:26.000Z
|
2021-07-30T21:58:26.000Z
|
car_detection.py
|
jitendrasb24/Car-Detection-OpenCV
|
92a68158bde3ae6168d09b38a6301af4362425ec
|
[
"MIT"
] | null | null | null |
car_detection.py
|
jitendrasb24/Car-Detection-OpenCV
|
92a68158bde3ae6168d09b38a6301af4362425ec
|
[
"MIT"
] | null | null | null |
#import libraries of python opencv
import cv2
# capture video/ video path
cap = cv2.VideoCapture('cars.mp4')
#use trained cars XML classifiers
car_cascade = cv2.CascadeClassifier('haarcascade_cars.xml')
#read until video is completed
while True:
#capture frame by frame
ret, frame = cap.read()
#convert video into gray scale of each frames
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#detect cars in the video
cars = car_cascade.detectMultiScale(gray, 1.1, 3)
#cv2.im_write(cars)
#to draw a rectangle in each cars
for (x,y,w,h) in cars:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('video', frame)
crop_img = frame[y:y+h,x:x+w]
#press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
#release the video-capture object
cap.release()
#close all the frames
cv2.destroyAllWindows()
| 23.736842
| 59
| 0.672949
|
import cv2
cap = cv2.VideoCapture('cars.mp4')
car_cascade = cv2.CascadeClassifier('haarcascade_cars.xml')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.1, 3)
for (x,y,w,h) in cars:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('video', frame)
crop_img = frame[y:y+h,x:x+w]
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true
| true
|
f70af5dc5a014d27ac2dd0ca237b6c9fbe2e74e4
| 573
|
py
|
Python
|
asgi_websub/hub.py
|
Kludex/fastapi-websub
|
1e109545e9ae26f9e36f10252ed321c41053224e
|
[
"MIT"
] | 1
|
2021-02-10T13:01:17.000Z
|
2021-02-10T13:01:17.000Z
|
asgi_websub/hub.py
|
Kludex/fastapi-websub
|
1e109545e9ae26f9e36f10252ed321c41053224e
|
[
"MIT"
] | null | null | null |
asgi_websub/hub.py
|
Kludex/fastapi-websub
|
1e109545e9ae26f9e36f10252ed321c41053224e
|
[
"MIT"
] | null | null | null |
"""
A WebSub Hub is an implementation that handles subscription requests and distributes
the content to subscribers when the corresponding topic URL has been updated. Hubs
MUST support subscription requests with a secret and deliver
[authenticated requests](https://www.w3.org/TR/websub/#authenticated-content-distribution)
when requested. Hubs MUST deliver the full contents of the topic URL in the request, and
MAY reduce the payload to a diff if the content type supports it. The conformance
criteria are described in Conformance Classes above.
"""
class Hub:
...
| 40.928571
| 90
| 0.799302
|
class Hub:
...
| true
| true
|
f70af7721a9f5b84af3224a3f8ec3e5b86a4b268
| 3,268
|
py
|
Python
|
tests/unit_tests/test_notice.py
|
i8enn/aiovertica
|
508c5a6a7b05e618c290271f404dee5e41c1d9a7
|
[
"Apache-2.0"
] | 1
|
2021-11-29T10:23:42.000Z
|
2021-11-29T10:23:42.000Z
|
tests/unit_tests/test_notice.py
|
i8enn/aiovertica
|
508c5a6a7b05e618c290271f404dee5e41c1d9a7
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_tests/test_notice.py
|
i8enn/aiovertica
|
508c5a6a7b05e618c290271f404dee5e41c1d9a7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2021 Micro Focus or one of its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import mock
from .base import VerticaPythonUnitTestCase
from aiovertica.messages import NoticeResponse
from aiovertica.errors import QueryError
class NoticeTestCase(VerticaPythonUnitTestCase):
SAMPLE_DATA = {b'S': 'FATAL',
b'H': 'This is a test hint',
b'L': '9999',
b'M': 'Failure is on purpose'}
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_error_message(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(
notice.error_message(),
'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999'
)
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_attribute_properties(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(notice.severity, 'FATAL')
self.assertEqual(notice.hint, 'This is a test hint')
# yes, line is still a string.
self.assertEqual(notice.line, '9999')
self.assertEqual(notice.message, 'Failure is on purpose')
self.assertIsNone(notice.detail)
self.assertIsNone(notice.sqlstate)
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_labeled_values(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(notice.values, {
'Severity': 'FATAL',
'Hint': 'This is a test hint',
'Line': '9999',
'Message': 'Failure is on purpose'})
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_query_error(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
query_error = QueryError(notice, 'Select Fake();')
self.assertEqual(query_error.severity, 'FATAL')
self.assertEqual(query_error.hint, 'This is a test hint')
self.assertEqual(query_error.line, '9999')
self.assertEqual(query_error.message, 'Failure is on purpose')
self.assertIsNone(query_error.detail)
self.assertIsNone(query_error.sqlstate)
self.assertEqual(
str(query_error),
'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999, SQL: \'Select Fake();\'')
| 40.345679
| 126
| 0.687576
|
from __future__ import print_function, division, absolute_import
import mock
from .base import VerticaPythonUnitTestCase
from aiovertica.messages import NoticeResponse
from aiovertica.errors import QueryError
class NoticeTestCase(VerticaPythonUnitTestCase):
SAMPLE_DATA = {b'S': 'FATAL',
b'H': 'This is a test hint',
b'L': '9999',
b'M': 'Failure is on purpose'}
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_error_message(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(
notice.error_message(),
'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999'
)
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_attribute_properties(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(notice.severity, 'FATAL')
self.assertEqual(notice.hint, 'This is a test hint')
self.assertEqual(notice.line, '9999')
self.assertEqual(notice.message, 'Failure is on purpose')
self.assertIsNone(notice.detail)
self.assertIsNone(notice.sqlstate)
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_labeled_values(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(notice.values, {
'Severity': 'FATAL',
'Hint': 'This is a test hint',
'Line': '9999',
'Message': 'Failure is on purpose'})
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_query_error(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
query_error = QueryError(notice, 'Select Fake();')
self.assertEqual(query_error.severity, 'FATAL')
self.assertEqual(query_error.hint, 'This is a test hint')
self.assertEqual(query_error.line, '9999')
self.assertEqual(query_error.message, 'Failure is on purpose')
self.assertIsNone(query_error.detail)
self.assertIsNone(query_error.sqlstate)
self.assertEqual(
str(query_error),
'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999, SQL: \'Select Fake();\'')
| true
| true
|
f70af7796f844e524d5d7ecb7ec6b3b1df6ca720
| 4,888
|
py
|
Python
|
rl_games/common/segment_tree.py
|
NikitaRdn/rl_games
|
50d9a460f8ba41de5dbac4abed04f8de9b849f4f
|
[
"MIT"
] | 193
|
2019-05-28T01:48:56.000Z
|
2022-03-31T07:56:37.000Z
|
rl_games/common/segment_tree.py
|
NikitaRdn/rl_games
|
50d9a460f8ba41de5dbac4abed04f8de9b849f4f
|
[
"MIT"
] | 35
|
2020-01-28T22:15:51.000Z
|
2022-03-28T22:10:54.000Z
|
rl_games/common/segment_tree.py
|
NikitaRdn/rl_games
|
50d9a460f8ba41de5dbac4abed04f8de9b849f4f
|
[
"MIT"
] | 37
|
2019-06-28T01:09:53.000Z
|
2022-03-26T09:14:06.000Z
|
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| 36.207407
| 109
| 0.557488
|
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity:
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
return super(MinSegmentTree, self).reduce(start, end)
| true
| true
|
f70af7f247a2d22961c36fe039a604db4f490235
| 10,146
|
py
|
Python
|
tests/integration/verify/v2/service/rate_limit/test_bucket.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 1,362
|
2015-01-04T10:25:18.000Z
|
2022-03-24T10:07:08.000Z
|
tests/integration/verify/v2/service/rate_limit/test_bucket.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 299
|
2015-01-30T09:52:39.000Z
|
2022-03-31T23:03:02.000Z
|
tests/integration/verify/v2/service/rate_limit/test_bucket.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 622
|
2015-01-03T04:43:09.000Z
|
2022-03-29T14:11:00.000Z
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BucketTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
values = {'Max': 1, 'Interval': 1, }
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
data=values,
))
def test_create_bucket_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
| 44.113043
| 194
| 0.583974
|
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BucketTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
values = {'Max': 1, 'Interval': 1, }
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
data=values,
))
def test_create_bucket_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
| true
| true
|
f70af9e0821e905487a6177fe9cec05be0014885
| 701
|
py
|
Python
|
com/puzzlesolver/cross_over.py
|
bekirduran/AI_Puzzle_Solver
|
7e8c007802d1e4596dd09edd97bafeb7a4ff7f61
|
[
"MIT"
] | null | null | null |
com/puzzlesolver/cross_over.py
|
bekirduran/AI_Puzzle_Solver
|
7e8c007802d1e4596dd09edd97bafeb7a4ff7f61
|
[
"MIT"
] | null | null | null |
com/puzzlesolver/cross_over.py
|
bekirduran/AI_Puzzle_Solver
|
7e8c007802d1e4596dd09edd97bafeb7a4ff7f61
|
[
"MIT"
] | null | null | null |
import numpy as np
# This class generating new list item given first of list item row and second of list item row
class Crossover:
@staticmethod
def crossover(best):
row_begin_index = 0
row_half = 2
cross_list = []
for i in range(len(best) - 1):
first_part1 = best[i][row_begin_index:row_half, :]
first_part2 = best[i + 1][row_half:, :]
cross_list.append(np.concatenate((first_part1, first_part2)))
second_part1 = best[i][row_half:, :]
second_part2 = best[i + 1][row_begin_index:row_half, :]
cross_list.append(np.concatenate((second_part2, second_part1)))
return cross_list
| 29.208333
| 94
| 0.621969
|
import numpy as np
class Crossover:
@staticmethod
def crossover(best):
row_begin_index = 0
row_half = 2
cross_list = []
for i in range(len(best) - 1):
first_part1 = best[i][row_begin_index:row_half, :]
first_part2 = best[i + 1][row_half:, :]
cross_list.append(np.concatenate((first_part1, first_part2)))
second_part1 = best[i][row_half:, :]
second_part2 = best[i + 1][row_begin_index:row_half, :]
cross_list.append(np.concatenate((second_part2, second_part1)))
return cross_list
| true
| true
|
f70af9fd28bd05e69268c8b7a15da6da05539c50
| 2,299
|
py
|
Python
|
handlers/commandInfo.py
|
secondfry/school21-randomcoffee
|
261b8d562d02b5a79b12603e0b74c90289523408
|
[
"MIT"
] | 3
|
2021-02-28T12:00:26.000Z
|
2021-03-14T03:00:42.000Z
|
handlers/commandInfo.py
|
secondfry/school21-randomcoffee
|
261b8d562d02b5a79b12603e0b74c90289523408
|
[
"MIT"
] | null | null | null |
handlers/commandInfo.py
|
secondfry/school21-randomcoffee
|
261b8d562d02b5a79b12603e0b74c90289523408
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any
from telegram import Update, ParseMode
from telegram.ext import CallbackContext
from config.constants import (
USER_DATA_V1_SETTINGS_CAMPUS,
USER_DATA_V1_SETTINGS_ONLINE,
USER_DATA_V1_INTRA_LOGIN,
USER_DATA_V1_INTRA_CAMPUS,
USER_DATA_V1_SETTINGS_ACTIVE,
USER_DATA_V1_AUTHORIZED,
USER_DATA_V1_TELEGRAM_USERNAME,
USER_DATA_V1_MATCH_WITH,
)
from config.env import ADMIN_IDS
from utils.lang import COMMAND_DENIED_NOT_AUTHORIZED
def info(data: Dict[str, Any], is_admin_request: bool = False) -> str:
fields = [
USER_DATA_V1_INTRA_LOGIN,
USER_DATA_V1_INTRA_CAMPUS,
USER_DATA_V1_SETTINGS_CAMPUS,
USER_DATA_V1_SETTINGS_ONLINE,
USER_DATA_V1_SETTINGS_ACTIVE,
USER_DATA_V1_TELEGRAM_USERNAME,
]
if is_admin_request:
fields.append(USER_DATA_V1_MATCH_WITH)
return '\n'.join(['{}: {}'.format(x, data.get(x, '???')) for x in fields])
def info_other(upd: Update, ctx: CallbackContext) -> None:
param = ctx.args[0]
user = None
for uid, udata in ctx.dispatcher.user_data.items():
if USER_DATA_V1_INTRA_LOGIN not in udata:
continue
if udata[USER_DATA_V1_INTRA_LOGIN] == param:
user = udata
break
if str(uid) == param:
user = udata
break
if not user:
ctx.bot.send_message(upd.effective_user.id, text='{} not found'.format(param))
return
message = info(user, is_admin_request=True)
ctx.bot.send_message(
upd.effective_user.id,
text='```\ntelegram.id: {}\n{}\n```'.format(
uid,
message
),
parse_mode=ParseMode.MARKDOWN
)
def info_self(upd: Update, ctx: CallbackContext) -> None:
message = info(ctx.user_data)
ctx.bot.send_message(upd.effective_user.id, text='```\n{}\n```'.format(message), parse_mode=ParseMode.MARKDOWN)
def handler_command_info(upd: Update, ctx: CallbackContext) -> None:
if not ctx.user_data.get(USER_DATA_V1_AUTHORIZED, False):
ctx.bot.send_message(upd.effective_user.id, text=COMMAND_DENIED_NOT_AUTHORIZED)
return
if ctx.args and upd.effective_user.id in ADMIN_IDS:
return info_other(upd, ctx)
return info_self(upd, ctx)
| 28.382716
| 115
| 0.676816
|
from typing import Dict, Any
from telegram import Update, ParseMode
from telegram.ext import CallbackContext
from config.constants import (
USER_DATA_V1_SETTINGS_CAMPUS,
USER_DATA_V1_SETTINGS_ONLINE,
USER_DATA_V1_INTRA_LOGIN,
USER_DATA_V1_INTRA_CAMPUS,
USER_DATA_V1_SETTINGS_ACTIVE,
USER_DATA_V1_AUTHORIZED,
USER_DATA_V1_TELEGRAM_USERNAME,
USER_DATA_V1_MATCH_WITH,
)
from config.env import ADMIN_IDS
from utils.lang import COMMAND_DENIED_NOT_AUTHORIZED
def info(data: Dict[str, Any], is_admin_request: bool = False) -> str:
fields = [
USER_DATA_V1_INTRA_LOGIN,
USER_DATA_V1_INTRA_CAMPUS,
USER_DATA_V1_SETTINGS_CAMPUS,
USER_DATA_V1_SETTINGS_ONLINE,
USER_DATA_V1_SETTINGS_ACTIVE,
USER_DATA_V1_TELEGRAM_USERNAME,
]
if is_admin_request:
fields.append(USER_DATA_V1_MATCH_WITH)
return '\n'.join(['{}: {}'.format(x, data.get(x, '???')) for x in fields])
def info_other(upd: Update, ctx: CallbackContext) -> None:
param = ctx.args[0]
user = None
for uid, udata in ctx.dispatcher.user_data.items():
if USER_DATA_V1_INTRA_LOGIN not in udata:
continue
if udata[USER_DATA_V1_INTRA_LOGIN] == param:
user = udata
break
if str(uid) == param:
user = udata
break
if not user:
ctx.bot.send_message(upd.effective_user.id, text='{} not found'.format(param))
return
message = info(user, is_admin_request=True)
ctx.bot.send_message(
upd.effective_user.id,
text='```\ntelegram.id: {}\n{}\n```'.format(
uid,
message
),
parse_mode=ParseMode.MARKDOWN
)
def info_self(upd: Update, ctx: CallbackContext) -> None:
message = info(ctx.user_data)
ctx.bot.send_message(upd.effective_user.id, text='```\n{}\n```'.format(message), parse_mode=ParseMode.MARKDOWN)
def handler_command_info(upd: Update, ctx: CallbackContext) -> None:
if not ctx.user_data.get(USER_DATA_V1_AUTHORIZED, False):
ctx.bot.send_message(upd.effective_user.id, text=COMMAND_DENIED_NOT_AUTHORIZED)
return
if ctx.args and upd.effective_user.id in ADMIN_IDS:
return info_other(upd, ctx)
return info_self(upd, ctx)
| true
| true
|
f70afa56a030c0bd6f0835494dee2ed74f7dff35
| 3,337
|
py
|
Python
|
src/gui.py
|
ksern94/six-percent
|
a3eb637d72d47f396945a4488222d63ae93df53d
|
[
"MIT"
] | 1
|
2020-10-17T08:56:41.000Z
|
2020-10-17T08:56:41.000Z
|
src/gui.py
|
ksern94/six-percent
|
a3eb637d72d47f396945a4488222d63ae93df53d
|
[
"MIT"
] | null | null | null |
src/gui.py
|
ksern94/six-percent
|
a3eb637d72d47f396945a4488222d63ae93df53d
|
[
"MIT"
] | null | null | null |
import logging
import os
import re
import sys
from typing import Any, Dict
import PySimpleGUI as sg # type: ignore
from PySimpleGUI.PySimpleGUI import Column # type: ignore
from .utils.encryption import encrypt_password, generate_key
logger = logging.getLogger(__name__)
def login_gui() -> Dict[str, Any]:
sg.theme('DarkTeal12')
def collapse(layout: list, key: str, visible: bool) -> Column:
"""
Helper function to hide and un-hide layouts
"""
return sg.pin(sg.Column(layout, key=key, visible=visible))
def main() -> Dict[str, Any]:
"""
Main GUI function
"""
new_user_section = [
[sg.Text('Username'), sg.Input(key='_USERNAME_', tooltip='What is your myASNB account username?')],
[sg.Text('Password'), sg.Input(key='_PASSWORD_', password_char="*", tooltip='What is your myASNB account password?')],
[sg.Text('Investment Amount (RM)'), sg.Input(key='_INVESTMENT_AMOUNT_', tooltip='How much do you want to invest?', change_submits=True, do_not_clear=True)],
]
layout = [
[sg.Text('myASNB Unit Holder Login', font='Helvetica 20', justification='center')],
[sg.Checkbox('Login as new user', enable_events=True, key='_CHECKBOX_KEY_', tooltip='Tick to login.')],
[collapse(new_user_section, '_SECTION_KEY_', False)],
[sg.OK('Start', tooltip='Start the bot (Press: ENTER)', size=(10, 1), bind_return_key=True, focus=True), sg.Cancel('Quit', tooltip='Goodbye.', size=(5, 1))],
]
window = sg.Window(
'Six Percent',
layout,
auto_size_text=False,
default_element_size=(25, 1),
text_justification='l',
return_keyboard_events=True,
grab_anywhere=False,
)
user_credentials_template = dict(username='', password='', investment_amount='')
user_credentials = user_credentials_template.copy()
section_toggle = False
while True:
event, values = window.read()
if event == '_CHECKBOX_KEY_':
section_toggle = not section_toggle
window['_SECTION_KEY_'].update(visible=section_toggle)
elif event == '_INVESTMENT_AMOUNT_':
window.FindElement(event).Update(re.sub("[^0-9]", "", values[event]))
user_credentials = {
**user_credentials,
'username': values['_USERNAME_'],
'password': values['_PASSWORD_'],
'investment_amount': values['_INVESTMENT_AMOUNT_'],
}
if event in (sg.WIN_CLOSED, 'Quit'):
logger.info('Exiting program gracefully')
window.close()
sys.exit()
elif event == 'Start':
break
window.close()
if not os.path.isfile('secret.key'):
generate_key()
# Encrypts user password before storing it
if user_credentials['password']:
user_credentials['password'] = encrypt_password(user_credentials['password'])
return dict() if user_credentials == user_credentials_template else user_credentials
user_info = main()
return user_info
if __name__ == '__main__':
logger.info(login_gui())
| 34.05102
| 169
| 0.596943
|
import logging
import os
import re
import sys
from typing import Any, Dict
import PySimpleGUI as sg
from PySimpleGUI.PySimpleGUI import Column
from .utils.encryption import encrypt_password, generate_key
logger = logging.getLogger(__name__)
def login_gui() -> Dict[str, Any]:
sg.theme('DarkTeal12')
def collapse(layout: list, key: str, visible: bool) -> Column:
return sg.pin(sg.Column(layout, key=key, visible=visible))
def main() -> Dict[str, Any]:
new_user_section = [
[sg.Text('Username'), sg.Input(key='_USERNAME_', tooltip='What is your myASNB account username?')],
[sg.Text('Password'), sg.Input(key='_PASSWORD_', password_char="*", tooltip='What is your myASNB account password?')],
[sg.Text('Investment Amount (RM)'), sg.Input(key='_INVESTMENT_AMOUNT_', tooltip='How much do you want to invest?', change_submits=True, do_not_clear=True)],
]
layout = [
[sg.Text('myASNB Unit Holder Login', font='Helvetica 20', justification='center')],
[sg.Checkbox('Login as new user', enable_events=True, key='_CHECKBOX_KEY_', tooltip='Tick to login.')],
[collapse(new_user_section, '_SECTION_KEY_', False)],
[sg.OK('Start', tooltip='Start the bot (Press: ENTER)', size=(10, 1), bind_return_key=True, focus=True), sg.Cancel('Quit', tooltip='Goodbye.', size=(5, 1))],
]
window = sg.Window(
'Six Percent',
layout,
auto_size_text=False,
default_element_size=(25, 1),
text_justification='l',
return_keyboard_events=True,
grab_anywhere=False,
)
user_credentials_template = dict(username='', password='', investment_amount='')
user_credentials = user_credentials_template.copy()
section_toggle = False
while True:
event, values = window.read()
if event == '_CHECKBOX_KEY_':
section_toggle = not section_toggle
window['_SECTION_KEY_'].update(visible=section_toggle)
elif event == '_INVESTMENT_AMOUNT_':
window.FindElement(event).Update(re.sub("[^0-9]", "", values[event]))
user_credentials = {
**user_credentials,
'username': values['_USERNAME_'],
'password': values['_PASSWORD_'],
'investment_amount': values['_INVESTMENT_AMOUNT_'],
}
if event in (sg.WIN_CLOSED, 'Quit'):
logger.info('Exiting program gracefully')
window.close()
sys.exit()
elif event == 'Start':
break
window.close()
if not os.path.isfile('secret.key'):
generate_key()
if user_credentials['password']:
user_credentials['password'] = encrypt_password(user_credentials['password'])
return dict() if user_credentials == user_credentials_template else user_credentials
user_info = main()
return user_info
if __name__ == '__main__':
logger.info(login_gui())
| true
| true
|
f70afac02a6128129ed13868fbdec50f32f336fa
| 1,077
|
py
|
Python
|
src/cards.py
|
tylernickr/cribbage
|
04d594c2c9fcc2faf96f17bfa3d75b76b9ee36f8
|
[
"MIT"
] | null | null | null |
src/cards.py
|
tylernickr/cribbage
|
04d594c2c9fcc2faf96f17bfa3d75b76b9ee36f8
|
[
"MIT"
] | null | null | null |
src/cards.py
|
tylernickr/cribbage
|
04d594c2c9fcc2faf96f17bfa3d75b76b9ee36f8
|
[
"MIT"
] | null | null | null |
from random import shuffle
class Deck(object):
CARD_VALUES = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
CARD_SUITS = ['H', 'D', 'S', 'C']
@staticmethod
def get_shuffled_deck():
deck = Deck()
deck.shuffle()
return deck
def __init__(self):
self.cards = []
for cardSuit in self.__class__.CARD_SUITS:
for cardValue in self.__class__.CARD_VALUES:
self.cards.append(Card(cardValue, cardSuit))
def shuffle(self):
shuffle(self.cards)
def draw(self):
return self.cards.pop()
class Card(object):
def __init__(self, value, suit):
self.value = value
self.suit = suit
def get_value(self):
return self.value
def get_suit(self):
return self.suit
def __str__(self):
return self.value + self.suit
def __eq__(self, other):
try:
return self.get_value() + self.get_suit() == other.get_value() + other.get_suit()
except AttributeError:
return False
| 22.914894
| 93
| 0.559889
|
from random import shuffle
class Deck(object):
CARD_VALUES = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
CARD_SUITS = ['H', 'D', 'S', 'C']
@staticmethod
def get_shuffled_deck():
deck = Deck()
deck.shuffle()
return deck
def __init__(self):
self.cards = []
for cardSuit in self.__class__.CARD_SUITS:
for cardValue in self.__class__.CARD_VALUES:
self.cards.append(Card(cardValue, cardSuit))
def shuffle(self):
shuffle(self.cards)
def draw(self):
return self.cards.pop()
class Card(object):
def __init__(self, value, suit):
self.value = value
self.suit = suit
def get_value(self):
return self.value
def get_suit(self):
return self.suit
def __str__(self):
return self.value + self.suit
def __eq__(self, other):
try:
return self.get_value() + self.get_suit() == other.get_value() + other.get_suit()
except AttributeError:
return False
| true
| true
|
f70afaf5df169b83dcb2c1b6ed171ffa8616d273
| 5,747
|
py
|
Python
|
tensorflow/python/compiler/tensorrt/test/trt_mode_test.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | 1
|
2020-10-01T16:52:51.000Z
|
2020-10-01T16:52:51.000Z
|
tensorflow/python/compiler/tensorrt/test/trt_mode_test.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | 1
|
2022-02-10T01:08:48.000Z
|
2022-02-10T01:08:48.000Z
|
tensorflow/python/compiler/tensorrt/test/trt_mode_test.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from unittest import SkipTest # pylint: disable=g-importing-member
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TrtModeTestBase(trt_test.TfTrtIntegrationTestBase):
"""Test squeeze on batch dim and some unary operations in TF-TRT."""
def GraphFn(self, x1):
q = math_ops.abs(x1)
q = q + 1.0
q = q * 3.0
q = array_ops.squeeze(q, 0)
q = math_ops.abs(q)
q = q + 5.0
return array_ops.identity(q, name="output_0")
def GetParams(self):
"""The input has 1 as a first dimension, which is removed by the squeeze.
op in the graph.
In explicit batch mode, TensorRT can convert the whole graph. In this mode
it is possible to manipulate the batch dimension using the squeeze op.
In implicit batch mode TensorRT cannot convert the whole graph. We are not
allowed to manipulate (squeeze) the first dimension in implicit batch mode.
Therefore the graph will be converted using multiple segments.
"""
return self.BuildParams(self.GraphFn, dtypes.float32, [[1, 12, 5]],
[[12, 5]])
def GetConversionParams(self, run_params, implicit_batch=False):
"""Return a TrtConversionParams for test."""
conversion_params = super(TrtModeTestBase,
self).GetConversionParams(run_params)
rewriter_config = self.GetTrtRewriterConfig(
run_params=run_params,
conversion_params=conversion_params,
use_implicit_batch=implicit_batch)
return conversion_params._replace(rewriter_config_template=rewriter_config)
@classmethod
def setUpClass(cls):
if cls is TrtModeTestBase:
raise SkipTest("TrtModeTestBase defines base class for other test.")
super(TrtModeTestBase, cls).setUpClass()
class ImplicitBatchTest(TrtModeTestBase):
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test using implicit batch mdoe."""
return super(ImplicitBatchTest, self).GetConversionParams(run_params, True)
def ExpectedEnginesToBuild(self, run_params):
"""Check that the expected engine is built.
Args:
run_params: the run parameters.
Returns:
the expected engines to build.
The squeeze op is not converted by TensorRT in implicit batch mode.
Because of this we have two TRTEngineOp in the graphs: one for the
subgraph before 'squeeze(q,0)', and another one for the rest of the ops
after the 'squeeze(q,0)'.
"""
return ["TRTEngineOp_0", "TRTEngineOp_1"]
class ExplicitBatchTest(TrtModeTestBase):
def GetParams(self):
"""We specify input/output masks with static (known) shapes."""
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[1, 12, 5]], [[12, 5]],
input_mask=[[True, True, True]],
output_mask=[[True, True]])
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test that enables explicit batch."""
return super(ExplicitBatchTest, self).GetConversionParams(run_params, False)
def ExpectedEnginesToBuild(self, run_params):
"""Check that the expected engine is built.
Args:
run_params: the run parameters.
Returns:
the expected engines to build.
In explicit batch mode the whole graph is converted using a single engine.
"""
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
# Only run for TRT 6 and above.
ver = get_linked_tensorrt_version()
return ver[0] >= 6 and (not run_params.use_calibration)
class DynamicShapesTest(TrtModeTestBase):
"""Test with dynamic input shapes.
DynamicShapesTest is different from ExplicitBatchTest in that it uses input
and output masks to change the input and output shapes to unknown shapes.
"""
def GetParams(self):
"""We specify input/output mask with dynamic (unknown) shapes."""
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[1, 12, 5]], [[12, 5]],
input_mask=[[False, False, False]],
output_mask=[[False, False]])
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test that enables explicit batch."""
return super(DynamicShapesTest, self).GetConversionParams(run_params, False)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
# Only run for TRT 6 and above.
ver = get_linked_tensorrt_version()
return ver[0] >= 6 and (not run_params.use_calibration)
if __name__ == "__main__":
test.main()
| 35.257669
| 93
| 0.712894
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from unittest import SkipTest
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TrtModeTestBase(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, x1):
q = math_ops.abs(x1)
q = q + 1.0
q = q * 3.0
q = array_ops.squeeze(q, 0)
q = math_ops.abs(q)
q = q + 5.0
return array_ops.identity(q, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[1, 12, 5]],
[[12, 5]])
def GetConversionParams(self, run_params, implicit_batch=False):
conversion_params = super(TrtModeTestBase,
self).GetConversionParams(run_params)
rewriter_config = self.GetTrtRewriterConfig(
run_params=run_params,
conversion_params=conversion_params,
use_implicit_batch=implicit_batch)
return conversion_params._replace(rewriter_config_template=rewriter_config)
@classmethod
def setUpClass(cls):
if cls is TrtModeTestBase:
raise SkipTest("TrtModeTestBase defines base class for other test.")
super(TrtModeTestBase, cls).setUpClass()
class ImplicitBatchTest(TrtModeTestBase):
def GetConversionParams(self, run_params):
return super(ImplicitBatchTest, self).GetConversionParams(run_params, True)
def ExpectedEnginesToBuild(self, run_params):
return ["TRTEngineOp_0", "TRTEngineOp_1"]
class ExplicitBatchTest(TrtModeTestBase):
def GetParams(self):
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[1, 12, 5]], [[12, 5]],
input_mask=[[True, True, True]],
output_mask=[[True, True]])
def GetConversionParams(self, run_params):
return super(ExplicitBatchTest, self).GetConversionParams(run_params, False)
def ExpectedEnginesToBuild(self, run_params):
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
ver = get_linked_tensorrt_version()
return ver[0] >= 6 and (not run_params.use_calibration)
class DynamicShapesTest(TrtModeTestBase):
def GetParams(self):
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[1, 12, 5]], [[12, 5]],
input_mask=[[False, False, False]],
output_mask=[[False, False]])
def GetConversionParams(self, run_params):
return super(DynamicShapesTest, self).GetConversionParams(run_params, False)
def ExpectedEnginesToBuild(self, run_params):
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
ver = get_linked_tensorrt_version()
return ver[0] >= 6 and (not run_params.use_calibration)
if __name__ == "__main__":
test.main()
| true
| true
|
f70afc2cecdad59dc581cd68886b60f4e9f9968e
| 870
|
py
|
Python
|
ax/modelbridge/numpy.py
|
mpolson64/Ax-1
|
cf9e12cc1253efe0fc893f2620e99337e0927a26
|
[
"MIT"
] | 1
|
2022-02-10T10:51:40.000Z
|
2022-02-10T10:51:40.000Z
|
ax/modelbridge/numpy.py
|
mpolson64/Ax-1
|
cf9e12cc1253efe0fc893f2620e99337e0927a26
|
[
"MIT"
] | null | null | null |
ax/modelbridge/numpy.py
|
mpolson64/Ax-1
|
cf9e12cc1253efe0fc893f2620e99337e0927a26
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from ax.modelbridge.array import ArrayModelBridge
from ax.models.numpy_base import NumpyModel
# pyre-fixme[13]: Attribute `model` is never initialized.
# pyre-fixme[13]: Attribute `outcomes` is never initialized.
# pyre-fixme[13]: Attribute `parameters` is never initialized.
class NumpyModelBridge(ArrayModelBridge):
"""A model bridge for using numpy array-based models.
This model bridge interfaces with NumpyModel.
Requires that all parameters have been transformed to RangeParameters
or FixedParameters with float type and no log scale.
"""
model: NumpyModel
outcomes: List[str]
parameters: List[str]
| 31.071429
| 73
| 0.757471
|
from typing import List
from ax.modelbridge.array import ArrayModelBridge
from ax.models.numpy_base import NumpyModel
class NumpyModelBridge(ArrayModelBridge):
model: NumpyModel
outcomes: List[str]
parameters: List[str]
| true
| true
|
f70afc3ff1c2e6df15de3340a6c530b958a903f9
| 26,367
|
py
|
Python
|
src/transformers/training_args.py
|
hlahkar/transformers
|
c19d04623eacfbc2c452397a5eda0fde42db3fc5
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/training_args.py
|
hlahkar/transformers
|
c19d04623eacfbc2c452397a5eda0fde42db3fc5
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/training_args.py
|
hlahkar/transformers
|
c19d04623eacfbc2c452397a5eda0fde42db3fc5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import json
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required
from .trainer_utils import EvaluationStrategy
from .utils import logging
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.get_logger(__name__)
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
"""
TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop
itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_eval (:obj:`bool`, `optional`):
Whether to run evaluation on the dev set or not. Will be set to :obj:`True` if :obj:`evaluation_strategy`
is different from :obj:`"no"`. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not. This argument is not directly used by
:class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
details.
evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
The evaluation strategy to adopt during training. Possible values are:
* :obj:`"no"`: No evaluation is done during training.
* :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
* :obj:`"epoch"`: Evaluation is done at the end of each epoch.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
.. warning::
When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training
examples.
eval_accumulation_steps (:obj:`int`, `optional`):
Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If
left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but
requires more memory).
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_beta1 (:obj:`float`, `optional`, defaults to 0.9):
The beta1 for the Adam optimizer.
adam_beta2 (:obj:`float`, `optional`, defaults to 0.999):
The beta2 for the Adam optimizer.
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
Epsilon for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform (if not an integer, will perform the decimal part percents of
the last epoch before stopping training).
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to log and evaluate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the number of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
When training on TPU, whether to print debug metrics or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`):
Number of update steps between two evaluations if :obj:`evaluation_strategy="steps"`. Will default to the
same value as :obj:`logging_steps` if not set.
dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):
Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the
main process.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
run_name (:obj:`str`, `optional`):
A descriptor for the run. Notably used for wandb logging.
disable_tqdm (:obj:`bool`, `optional`):
Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set
to warn or lower (default), :obj:`False` otherwise.
remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):
If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model
forward method.
(Note that this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)
label_names (:obj:`List[str]`, `optional`):
The list of keys in your dictionary of inputs that correspond to the labels.
Will eventually default to :obj:`["labels"]` except if the model used is one of the
:obj:`XxxForQuestionAnswering` in which case it will default to :obj:`["start_positions",
"end_positions"]`.
load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to load the best model found during training at the end of training.
.. note::
When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved
after each evaluation.
metric_for_best_model (:obj:`str`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different
models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`"eval_"`.
Will default to :obj:`"loss"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation
loss).
If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to
:obj:`False` if your metric is better when lower.
greater_is_better (:obj:`bool`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better
models should have a greater metric or not. Will default to:
- :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`"loss"` or
:obj:`"eval_loss"`.
- :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`"loss"` or :obj:`"eval_loss"`.
model_parallel (:obj:`bool`, `optional`, defaults to :obj:`False`):
If there are more than one devices, whether to use model parallelism to distribute the model's modules
across devices or not.
ignore_data_skip (:obj:`bool`, `optional`, defaults to :obj:`False`):
When resuming training, whether or not to skip the epochs and batches to get the data loading at the same
stage as in the previous training. If set to :obj:`True`, the training will begin faster (as that skipping
step can take a long time) but will not yield the same results as the interrupted training would have.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=None, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
model_parallel: bool = field(
default=False,
metadata={
"help": (
"If there are more than one devices, whether to use model parallelism to distribute the "
"model's modules across devices."
)
},
)
evaluation_strategy: EvaluationStrategy = field(
default="no",
metadata={"help": "Run evaluation during training at each logging step."},
)
prediction_loss_only: bool = field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
eval_accumulation_steps: Optional[int] = field(
default=None,
metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
dataloader_num_workers: int = field(
default=0,
metadata={
"help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process."
},
)
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
run_name: Optional[str] = field(
default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."}
)
disable_tqdm: Optional[bool] = field(
default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."}
)
remove_unused_columns: Optional[bool] = field(
default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."}
)
label_names: Optional[List[str]] = field(
default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."}
)
load_best_model_at_end: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to load the best model found during training at the end of training."},
)
metric_for_best_model: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
greater_is_better: Optional[bool] = field(
default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."}
)
ignore_data_skip: bool = field(
default=False,
metadata={
"help": "When resuming training, whether or not to skip the first epochs and batches to get to the same training data."
},
)
def __post_init__(self):
if self.disable_tqdm is None:
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)
if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:
self.do_eval = True
if self.eval_steps is None:
self.eval_steps = self.logging_steps
if self.load_best_model_at_end and self.metric_for_best_model is None:
self.metric_for_best_model = "loss"
if self.greater_is_better is None and self.metric_for_best_model is not None:
self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"]
if self.run_name is None:
self.run_name = self.output_dir
if is_torch_available() and self.device.type != "cuda" and self.fp16:
raise ValueError("AMP (`--fp16`) can only be used on CUDA devices.")
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
if not self.model_parallel:
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
train_batch_size = per_device_batch_size
return train_batch_size
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
if not self.model_parallel:
eval_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
eval_batch_size = per_device_batch_size
return eval_batch_size
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
"""
The device used by this process.
"""
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
return self._setup_devices[1]
@property
@torch_required
def parallel_mode(self):
"""
The current mode used for parallelism if multiple GPUs/TPU cores are available. One of:
- :obj:`ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU).
- :obj:`ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses :obj:`torch.nn.DataParallel`).
- :obj:`ParallelMode.DISTRIBUTED`: several GPUs, each ahving its own process (uses
:obj:`torch.nn.DistributedDataParallel`).
- :obj:`ParallelMode.TPU`: several TPU cores.
"""
if is_torch_tpu_available():
return ParallelMode.TPU
elif self.local_rank != -1:
return ParallelMode.DISTRIBUTED
elif self.n_gpu > 1:
return ParallelMode.NOT_DISTRIBUTED
else:
return ParallelMode.NOT_PARALLEL
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support).
"""
d = dataclasses.asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
return d
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(self.to_dict(), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoard’s hparams
"""
d = self.to_dict()
d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
class ParallelMode(Enum):
NOT_PARALLEL = "not_parallel"
NOT_DISTRIBUTED = "not_distributed"
DISTRIBUTED = "distributed"
TPU = "tpu"
| 49.009294
| 142
| 0.64668
|
import dataclasses
import json
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required
from .trainer_utils import EvaluationStrategy
from .utils import logging
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.get_logger(__name__)
def default_logdir() -> str:
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=None, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
model_parallel: bool = field(
default=False,
metadata={
"help": (
"If there are more than one devices, whether to use model parallelism to distribute the "
"model's modules across devices."
)
},
)
evaluation_strategy: EvaluationStrategy = field(
default="no",
metadata={"help": "Run evaluation during training at each logging step."},
)
prediction_loss_only: bool = field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
eval_accumulation_steps: Optional[int] = field(
default=None,
metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
dataloader_num_workers: int = field(
default=0,
metadata={
"help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process."
},
)
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
run_name: Optional[str] = field(
default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."}
)
disable_tqdm: Optional[bool] = field(
default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."}
)
remove_unused_columns: Optional[bool] = field(
default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."}
)
label_names: Optional[List[str]] = field(
default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."}
)
load_best_model_at_end: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to load the best model found during training at the end of training."},
)
metric_for_best_model: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
greater_is_better: Optional[bool] = field(
default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."}
)
ignore_data_skip: bool = field(
default=False,
metadata={
"help": "When resuming training, whether or not to skip the first epochs and batches to get to the same training data."
},
)
def __post_init__(self):
if self.disable_tqdm is None:
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)
if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:
self.do_eval = True
if self.eval_steps is None:
self.eval_steps = self.logging_steps
if self.load_best_model_at_end and self.metric_for_best_model is None:
self.metric_for_best_model = "loss"
if self.greater_is_better is None and self.metric_for_best_model is not None:
self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"]
if self.run_name is None:
self.run_name = self.output_dir
if is_torch_available() and self.device.type != "cuda" and self.fp16:
raise ValueError("AMP (`--fp16`) can only be used on CUDA devices.")
@property
def train_batch_size(self) -> int:
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
if not self.model_parallel:
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
train_batch_size = per_device_batch_size
return train_batch_size
@property
def eval_batch_size(self) -> int:
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
if not self.model_parallel:
eval_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
eval_batch_size = per_device_batch_size
return eval_batch_size
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
return self._setup_devices[1]
@property
@torch_required
def parallel_mode(self):
if is_torch_tpu_available():
return ParallelMode.TPU
elif self.local_rank != -1:
return ParallelMode.DISTRIBUTED
elif self.n_gpu > 1:
return ParallelMode.NOT_DISTRIBUTED
else:
return ParallelMode.NOT_PARALLEL
def to_dict(self):
d = dataclasses.asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
return d
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
d = self.to_dict()
d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
class ParallelMode(Enum):
NOT_PARALLEL = "not_parallel"
NOT_DISTRIBUTED = "not_distributed"
DISTRIBUTED = "distributed"
TPU = "tpu"
| true
| true
|
f70afc80da633e56f080f73b9d417cce0188dc99
| 4,232
|
py
|
Python
|
influxdb_client/domain/variable_assignment.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/variable_assignment.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/variable_assignment.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class VariableAssignment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'id': 'Identifier',
'init': 'Expression'
}
attribute_map = {
'type': 'type',
'id': 'id',
'init': 'init'
}
def __init__(self, type=None, id=None, init=None): # noqa: E501
"""VariableAssignment - a model defined in OpenAPI""" # noqa: E501
self._type = None
self._id = None
self._init = None
self.discriminator = None
if type is not None:
self.type = type
if id is not None:
self.id = id
if init is not None:
self.init = init
@property
def type(self):
"""Gets the type of this VariableAssignment. # noqa: E501
type of AST node # noqa: E501
:return: The type of this VariableAssignment. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this VariableAssignment.
type of AST node # noqa: E501
:param type: The type of this VariableAssignment. # noqa: E501
:type: str
"""
self._type = type
@property
def id(self):
"""Gets the id of this VariableAssignment. # noqa: E501
:return: The id of this VariableAssignment. # noqa: E501
:rtype: Identifier
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VariableAssignment.
:param id: The id of this VariableAssignment. # noqa: E501
:type: Identifier
"""
self._id = id
@property
def init(self):
"""Gets the init of this VariableAssignment. # noqa: E501
:return: The init of this VariableAssignment. # noqa: E501
:rtype: Expression
"""
return self._init
@init.setter
def init(self, init):
"""Sets the init of this VariableAssignment.
:param init: The init of this VariableAssignment. # noqa: E501
:type: Expression
"""
self._init = init
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VariableAssignment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.341317
| 124
| 0.548204
|
import pprint
import re
import six
class VariableAssignment(object):
openapi_types = {
'type': 'str',
'id': 'Identifier',
'init': 'Expression'
}
attribute_map = {
'type': 'type',
'id': 'id',
'init': 'init'
}
def __init__(self, type=None, id=None, init=None):
self._type = None
self._id = None
self._init = None
self.discriminator = None
if type is not None:
self.type = type
if id is not None:
self.id = id
if init is not None:
self.init = init
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def init(self):
return self._init
@init.setter
def init(self, init):
self._init = init
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, VariableAssignment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70afe6072a76e5fe93f7e3e26275edc78dfaba1
| 784
|
py
|
Python
|
main/zlib/template.py
|
ismith/cports
|
9fe76e231872e0b03b425252b5fc5e1d9af2a6d8
|
[
"BSD-2-Clause"
] | null | null | null |
main/zlib/template.py
|
ismith/cports
|
9fe76e231872e0b03b425252b5fc5e1d9af2a6d8
|
[
"BSD-2-Clause"
] | null | null | null |
main/zlib/template.py
|
ismith/cports
|
9fe76e231872e0b03b425252b5fc5e1d9af2a6d8
|
[
"BSD-2-Clause"
] | null | null | null |
pkgname = "zlib"
version = "1.2.11"
revision = 0
build_style = "configure"
short_desc = "Compression/decompression Library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "Zlib"
homepage = "http://www.zlib.net"
distfiles = [f"{homepage}/{pkgname}-{version}.tar.gz"]
checksum = ["c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1"]
options = ["bootstrap"]
def do_configure(self):
self.do(self.chroot_cwd / "configure", [
"--prefix=/usr", "--shared"
])
@subpackage("zlib-devel")
def _devel(self):
self.depends = [f"zlib={version}-r{revision}"]
self.short_desc = short_desc + " - development files"
return [
"usr/include",
"usr/lib/pkgconfig",
"usr/lib/*.a",
"usr/lib/*.so",
"usr/share",
]
| 25.290323
| 79
| 0.640306
|
pkgname = "zlib"
version = "1.2.11"
revision = 0
build_style = "configure"
short_desc = "Compression/decompression Library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "Zlib"
homepage = "http://www.zlib.net"
distfiles = [f"{homepage}/{pkgname}-{version}.tar.gz"]
checksum = ["c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1"]
options = ["bootstrap"]
def do_configure(self):
self.do(self.chroot_cwd / "configure", [
"--prefix=/usr", "--shared"
])
@subpackage("zlib-devel")
def _devel(self):
self.depends = [f"zlib={version}-r{revision}"]
self.short_desc = short_desc + " - development files"
return [
"usr/include",
"usr/lib/pkgconfig",
"usr/lib/*.a",
"usr/lib/*.so",
"usr/share",
]
| true
| true
|
f70afe9d202280156e80d97bbe01c5a86d7add8a
| 154
|
py
|
Python
|
tests/test_nba_py_shotchart.py
|
evanmjohnson/nba-awards-predictor
|
33fbf48252bc7b85c5e406be13e957988c418182
|
[
"BSD-3-Clause"
] | 1,189
|
2015-08-25T22:51:49.000Z
|
2022-03-25T06:29:04.000Z
|
tests/test_nba_py_shotchart.py
|
calestini/nba_py
|
ffeaf4251d796ff9313367a752a45a0d7b16489e
|
[
"BSD-3-Clause"
] | 111
|
2015-08-28T15:41:10.000Z
|
2021-05-17T11:12:04.000Z
|
tests/test_nba_py_shotchart.py
|
calestini/nba_py
|
ffeaf4251d796ff9313367a752a45a0d7b16489e
|
[
"BSD-3-Clause"
] | 377
|
2015-08-26T00:35:07.000Z
|
2022-02-07T18:29:33.000Z
|
from nba_py import shotchart
from nba_py.player import get_player
def test():
pid = get_player('Kevin', 'Durant')
assert shotchart.ShotChart(pid)
| 25.666667
| 39
| 0.746753
|
from nba_py import shotchart
from nba_py.player import get_player
def test():
pid = get_player('Kevin', 'Durant')
assert shotchart.ShotChart(pid)
| true
| true
|
f70b001653854db2cd84ceba965fb48abc9e0a5c
| 18,327
|
py
|
Python
|
wagtail/wagtailusers/tests.py
|
jordij/wagtail
|
d4259e133b80d5648266db181029dfbe0fbcf885
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailusers/tests.py
|
jordij/wagtail
|
d4259e133b80d5648266db181029dfbe0fbcf885
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailusers/tests.py
|
jordij/wagtail
|
d4259e133b80d5648266db181029dfbe0fbcf885
|
[
"BSD-3-Clause"
] | 1
|
2019-02-04T13:57:39.000Z
|
2019-02-04T13:57:39.000Z
|
from __future__ import unicode_literals
import unittest
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.utils import six
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import Page, GroupPagePermission
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
# see https://github.com/torchbox/wagtail/issues/565
get_user_model().objects.create_user('guardian', 'guardian@example.com', 'gu@rd14n', id=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, 'test@user.com')
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
# Login
self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(id=self.test_user.id)
self.assertEqual(user.first_name, 'Edited')
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
@unittest.expectedFailure
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(id=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.id],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'], # as we have one page permission already
'page_permissions-0-id': [self.root_add_permission.id],
'page_permissions-0-page': [self.root_add_permission.page.id],
'page_permissions-0-permission_type': [self.root_add_permission.permission_type]
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(id__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(id=self.test_group.id)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-2-id': [''],
'page_permissions-2-page': ['1'],
'page_permissions-2-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['3'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permissions to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
root_edit_perm = GroupPagePermission.objects.create(page=self.root_page,
permission_type='edit',
group=self.test_group)
# The test group now has two page permissions
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
self.assertEqual(response.context['formset'].forms[1].instance, root_edit_perm)
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': [self.root_add_permission.page.id],
'page_permissions-1-permission_type': [self.root_add_permission.permission_type],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_form_includes_non_registered_permissions_in_initial_data(self):
self.add_non_registered_perm()
original_permissions = self.test_group.permissions.all()
self.assertEqual(original_permissions.count(), 2)
response = self.get()
# See that the form is set up with the correct initial data
self.assertEqual(response.context['form'].initial.get('permissions'), list(original_permissions.values_list('id', flat=True)))
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the exsisting
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
| 41.938215
| 134
| 0.6569
|
from __future__ import unicode_literals
import unittest
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.utils import six
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import Page, GroupPagePermission
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
get_user_model().objects.create_user('guardian', 'guardian@example.com', 'gu@rd14n', id=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
self.assertRedirects(response, reverse('wagtailusers_users:index'))
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, 'test@user.com')
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
})
self.assertRedirects(response, reverse('wagtailusers_users:index'))
user = get_user_model().objects.get(id=self.test_user.id)
self.assertEqual(user.first_name, 'Edited')
def test_edit_validation_error(self):
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
self.assertEqual(response.status_code, 200)
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
@unittest.expectedFailure
def test_duplicate_page_permissions_error(self):
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(id=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.id],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'],
'page_permissions-0-id': [self.root_add_permission.id],
'page_permissions-0-page': [self.root_add_permission.page.id],
'page_permissions-0-permission_type': [self.root_add_permission.permission_type]
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), post_data)
def add_non_registered_perm(self):
self.non_registered_perms = Permission.objects.exclude(id__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
group = Group.objects.get(id=self.test_group.id)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
response = self.post({'name': ""})
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions(self):
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-2-id': [''],
'page_permissions-2-page': ['1'],
'page_permissions-2-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['3'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_deleting_page_permissions(self):
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_loads_with_page_permissions_shown(self):
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
root_edit_perm = GroupPagePermission.objects.create(page=self.root_page,
permission_type='edit',
group=self.test_group)
self.assertEqual(self.test_group.page_permissions.count(), 2)
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
self.assertEqual(response.context['formset'].forms[1].instance, root_edit_perm)
def test_duplicate_page_permissions_error(self):
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': [self.root_add_permission.page.id],
'page_permissions-1-permission_type': [self.root_add_permission.permission_type],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
def test_group_add_registered_django_permissions(self):
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_form_includes_non_registered_permissions_in_initial_data(self):
self.add_non_registered_perm()
original_permissions = self.test_group.permissions.all()
self.assertEqual(original_permissions.count(), 2)
response = self.get()
self.assertEqual(response.context['form'].initial.get('permissions'), list(original_permissions.values_list('id', flat=True)))
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all())
self.post()
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertEqual(self.test_group.permissions.count(), 3)
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
self.post({'permissions': []})
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
| true
| true
|
f70b00541c568010e818a7b67bc34d90385cc984
| 148
|
py
|
Python
|
tasks.py
|
mtkennerly/clingy
|
39454bcf535127ee80ca3e9fb1580dfefcb8aad9
|
[
"MIT"
] | 1
|
2017-03-24T09:19:18.000Z
|
2017-03-24T09:19:18.000Z
|
tasks.py
|
mtkennerly/clingy
|
39454bcf535127ee80ca3e9fb1580dfefcb8aad9
|
[
"MIT"
] | null | null | null |
tasks.py
|
mtkennerly/clingy
|
39454bcf535127ee80ca3e9fb1580dfefcb8aad9
|
[
"MIT"
] | null | null | null |
from invoke import task
@task
def dist(context):
context.run("python setup.py bdist_wheel")
@task
def test(context):
context.run("tox")
| 12.333333
| 46
| 0.695946
|
from invoke import task
@task
def dist(context):
context.run("python setup.py bdist_wheel")
@task
def test(context):
context.run("tox")
| true
| true
|
f70b0072f7dbff073d399e1d6359799fc8a20bd0
| 23,970
|
py
|
Python
|
cmake/external/tvm/python/tvm/relay/op/nn/nn.py
|
fushwLZU/onnxruntime_test
|
7ee82dde9150dc0d3014c06a82eabdecb989f2f3
|
[
"MIT"
] | 2
|
2020-06-24T03:16:34.000Z
|
2020-06-24T03:16:36.000Z
|
cmake/external/tvm/python/tvm/relay/op/nn/nn.py
|
fushwLZU/onnxruntime_test
|
7ee82dde9150dc0d3014c06a82eabdecb989f2f3
|
[
"MIT"
] | 4
|
2020-12-04T21:00:38.000Z
|
2022-01-22T12:49:30.000Z
|
cmake/external/tvm/python/tvm/relay/op/nn/nn.py
|
fushwLZU/onnxruntime_test
|
7ee82dde9150dc0d3014c06a82eabdecb989f2f3
|
[
"MIT"
] | 1
|
2019-09-20T07:05:27.000Z
|
2019-09-20T07:05:27.000Z
|
"""Neural network operations."""
from __future__ import absolute_import as _abs
from ...expr import TupleWrapper
from . import _make
def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, conv2d takes in
a data Tensor with shape `(batch_size, in_channels, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] *
\mbox{weight}[c, k, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCHW` for data and `OIHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, out_layout, out_dtype)
def conv2d_transpose(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
output_padding=(0, 0),
out_dtype=""):
"""Two dimensional transposed convolution operator.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Tuple[int], optional
The strides of convoltution.
padding : Tuple[int], optional
The padding of convolution on both sides of inputs.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
output_padding : Tuple[int], optional
Additional zero-padding to be added to one side of the output.
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.conv2d_transpose(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, output_padding, out_dtype)
def softmax(data, axis=-1):
r"""Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator.
axis: int, optional
The axis to sum over when computing softmax
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.softmax(data, axis)
def log_softmax(data, axis=-1):
r"""Computes log softmax.
.. math::
\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.log_softmax(data, axis)
def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False):
r"""2D maximum pooling operator.
This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w) and pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)
def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False,
count_include_pad=False):
r"""2D average pooling operator.
This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w), pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)
def global_max_pool2d(data,
layout="NCHW"):
r"""2D global maximum pooling operator.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
\mbox{data}(b, c, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.global_max_pool2d(data, layout)
def global_avg_pool2d(data,
layout="NCHW"):
r"""2D global average pooling operator.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
\mbox{data}(b, c, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.global_avg_pool2d(data, layout)
def upsampling(data,
scale=1,
layout="NCHW",
method="NEAREST_NEIGHBOR"):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale, w*scale)
method indicates the algorithm to be used while calculating ghe out value
and method can be one of ("BILINEAR", "NEAREST_NEIGHBOR")
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
scale : tvm.relay.Expr
The scale factor for upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [NEAREST_NEIGHBOR, BILINEAR].
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.upsampling(data, scale, layout, method)
def batch_flatten(data):
"""BatchFlatten.
This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.
For data with shape ``(d1, d2, ..., dk)``
batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
Returns
-------
result : tvm.relay.Expr
The Flattened result.
"""
return _make.batch_flatten(data)
def bias_add(data, bias, axis=1):
"""add_bias operator.
Add 1D bias to the axis of data.
This function is a special case of add which allows
inference of shape of the bias from data.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
bias : tvm.relay.Expr
The bias to be added.
axis : int, optional
The axis to add the bias.
Returns
-------
result : tvm.relay.Expr
The final result.
"""
return _make.bias_add(data, bias, axis)
def dense(data, weight, units=None):
"""Dense operator.
Applies a linear transformation
.. math::
`Y = X * W`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
units : int, optional
Number of hidden units of the dense transformation.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dense(data, weight, units)
def relu(data):
"""Rectified linear unit.
.. math::
out = max(x, 0)
Parameters
----------
data : tvm.relay.Expr
The input data
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.relu(data)
def leaky_relu(data, alpha):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
`y = x > 0 ? x : alpha * x`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
alpha : float
Slope coefficient for the negative half axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.leaky_relu(data, alpha)
def prelu(data, alpha, axis=1):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
`y = x > 0 ? x : alpha * x`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
alpha : tvm.relay.Expr
Slope coefficient for the negative half axis.
axis : int, optional
Specify which shape axis the channel is specified.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.prelu(data, alpha, axis)
def pad(data,
pad_width,
pad_value=0.0):
r"""Padding
This operator takes in a tensor and pads each axis by the specified
widths using the specified value.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator
pad_width: tuple of <tuple of <int>>, required
Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), ..., (before_N, after_N))
pad_value: float, optional, default=0.0
The value used for padding
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.pad(data, pad_width, pad_value)
def lrn(data, size=5, axis=1, bias=2, alpha=.00001, beta=0.75):
"""This operator takes data as input and does local response normalization.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
(data / (bias + (alpha * sum_data ^2 /size))^beta)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
size : int, optional
The size of the local region to be considered for normalization.
axis : int, optional
Input data layout channel axis. Default value is 1 for NCHW format
bias : float, optional
The offset parameter to avoid dividing by 0.
alpha : float, optional
The scaling parameter.
beta : float, optional
The exponent parameter.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.lrn(data, size, axis, alpha, beta, bias)
def l2_normalize(data, eps, axis=None):
"""Perform L2 normalization on the input data
.. math::
y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
eps : float
epsilon value
axis : list of int, optional
axis over the normalization applied
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.l2_normalize(data, eps, axis)
def dropout(data, rate=0.5):
"""Applies the dropout operation to the input array.
During training, each element of the input is set to zero with
probability ``p``. The whole array is rescaled by ``1/(1-p)``
to keep the expected sum of the input unchanged.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
rate : float, optional (default=0.5)
The probability for an element to be reset to 0.
Returns
-------
result : tvm.relay.Expr
The result of dropout
"""
result = _make.dropout(data, rate)
return TupleWrapper(result, 2)[0]
def batch_norm(data,
gamma,
beta,
moving_mean,
moving_var,
axis=1,
epsilon=1e-5,
center=True,
scale=True):
r"""
Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
.. math::
data\_mean[i] = mean(data[:,i,:,...]) \\
data\_var[i] = var(data[:,i,:,...])
Then compute the normalized output, which has the same shape as input, as following:
.. math::
out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}}
* gamma[i] + beta[i]
Both *mean* and *var* returns a scalar by treating the input as a vector.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*.
Besides the inputs and the outputs, this operator accepts two auxiliary
states, ``moving_mean`` and ``moving_var``, which are *k*-length
vectors. They are global statistics for the whole dataset, which are updated by::
moving_mean = moving_mean * momentum + data_mean * (1 - momentum)
moving_var = moving_var * momentum + data_var * (1 - momentum)
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel' (separately normalized groups). The default is 1.
Specifying -1 sets the channel axis to be the last item in the input shape.
.. note::
This operator can be optimized away for inference.
Parameters
----------
data : tvm.relay.Expr
Input to which batch_norm will be applied.
gamma : tvm.relay.Expr
The gamma scale factor.
beta : tvm.relay.Expr
The beta offset factor.
moving_mean : tvm.relay.Expr
Running mean of input,
moving_var : tvm.relay.Expr
Running variance of input.
axis : int, optional, default=1
Specify along which shape axis the channel is specified.
epsilon : double, optional, default=1e-5
Small float added to variance to avoid diving by zero.
center : boolean, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : boolean, optional, default=True
If true, multiply by gamma. If False, gamma is not used.
When the next layer is piecewise linear (also e.g. nn.relu),
this can be disabled since the scaling will be done by the next layer.
Returns
-------
result : relay.Tuple([tvm.relay.Expr, tvm.relay.Expr, tvm.relay.Expr])
Tuple of normed data (same shape as input),
new running mean (k-length vector),
and new running variance (k-length vector)
"""
result = _make.batch_norm(data,
gamma,
beta,
moving_mean,
moving_var,
axis,
epsilon,
center,
scale)
return TupleWrapper(result, 3)
def contrib_conv2d_winograd_without_weight_transform(data,
weight,
tile_size,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution with winograd algorithm.
The basic parameters are the same as the ones in vanilla conv2d.
It assumes the weight is pre-transformed by nn.contrib_conv2d_winograd_weight_transform
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv2d_winograd_without_weight_transform(
data, weight, tile_size, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, out_layout, out_dtype)
def contrib_conv2d_winograd_weight_transform(weight,
tile_size):
r"""Weight Transformation part for 2D convolution with winograd algorithm.
We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_winograd_without_weight_transform
Parameters
----------
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv2d_winograd_weight_transform(weight, tile_size)
| 27.839721
| 93
| 0.591197
|
from __future__ import absolute_import as _abs
from ...expr import TupleWrapper
from . import _make
def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype=""):
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, out_layout, out_dtype)
def conv2d_transpose(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
output_padding=(0, 0),
out_dtype=""):
return _make.conv2d_transpose(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, output_padding, out_dtype)
def softmax(data, axis=-1):
return _make.softmax(data, axis)
def log_softmax(data, axis=-1):
return _make.log_softmax(data, axis)
def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False):
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)
def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False,
count_include_pad=False):
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)
def global_max_pool2d(data,
layout="NCHW"):
return _make.global_max_pool2d(data, layout)
def global_avg_pool2d(data,
layout="NCHW"):
return _make.global_avg_pool2d(data, layout)
def upsampling(data,
scale=1,
layout="NCHW",
method="NEAREST_NEIGHBOR"):
return _make.upsampling(data, scale, layout, method)
def batch_flatten(data):
return _make.batch_flatten(data)
def bias_add(data, bias, axis=1):
return _make.bias_add(data, bias, axis)
def dense(data, weight, units=None):
return _make.dense(data, weight, units)
def relu(data):
return _make.relu(data)
def leaky_relu(data, alpha):
return _make.leaky_relu(data, alpha)
def prelu(data, alpha, axis=1):
return _make.prelu(data, alpha, axis)
def pad(data,
pad_width,
pad_value=0.0):
return _make.pad(data, pad_width, pad_value)
def lrn(data, size=5, axis=1, bias=2, alpha=.00001, beta=0.75):
return _make.lrn(data, size, axis, alpha, beta, bias)
def l2_normalize(data, eps, axis=None):
return _make.l2_normalize(data, eps, axis)
def dropout(data, rate=0.5):
result = _make.dropout(data, rate)
return TupleWrapper(result, 2)[0]
def batch_norm(data,
gamma,
beta,
moving_mean,
moving_var,
axis=1,
epsilon=1e-5,
center=True,
scale=True):
result = _make.batch_norm(data,
gamma,
beta,
moving_mean,
moving_var,
axis,
epsilon,
center,
scale)
return TupleWrapper(result, 3)
def contrib_conv2d_winograd_without_weight_transform(data,
weight,
tile_size,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype=""):
return _make.contrib_conv2d_winograd_without_weight_transform(
data, weight, tile_size, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, out_layout, out_dtype)
def contrib_conv2d_winograd_weight_transform(weight,
tile_size):
return _make.contrib_conv2d_winograd_weight_transform(weight, tile_size)
| true
| true
|
f70b00d468378a77011e80dc65f27686941deebd
| 820
|
py
|
Python
|
tests/test_parser_leaf_html.py
|
sthagen/python-artichoke_growth
|
4d96d8bf63234248352dd10a3fb99c35d8312661
|
[
"MIT"
] | 1
|
2020-07-16T17:29:06.000Z
|
2020-07-16T17:29:06.000Z
|
tests/test_parser_leaf_html.py
|
sthagen/python-artichoke_growth
|
4d96d8bf63234248352dd10a3fb99c35d8312661
|
[
"MIT"
] | 17
|
2020-07-16T17:07:07.000Z
|
2020-12-06T16:36:23.000Z
|
tests/test_parser_leaf_html.py
|
sthagen/python-artichoke_growth
|
4d96d8bf63234248352dd10a3fb99c35d8312661
|
[
"MIT"
] | null | null | null |
import pathlib
from bs4 import BeautifulSoup
HTML_LEAF_PAGE_SAMPLE_PATH = pathlib.Path('tests', 'fixtures', 'html', 'leaf_page_sample.html')
HTML_TEXT = ''
def setup():
global HTML_TEXT
with open(HTML_LEAF_PAGE_SAMPLE_PATH, "rt", encoding="utf-8") as handle:
HTML_TEXT = handle.read()
def teardown():
global HTML_TEXT
HTML_TEXT = ''
def test_html_leaf_page_parse_fixture():
# soup = BeautifulSoup(HTML_TEXT, 'html.parser')
lines = [t for t in HTML_TEXT.split('\n') if t.startswith('<a href="')]
parsed = []
for line in lines:
a, x = line.split('">', 1)
f, r = x.split('</a>')
r = r.rstrip()
d, s, u = r.rsplit(' ', 2)
d = d.strip()
parsed.append((f, d, s, u))
for p in parsed:
print(p)
assert len(p) == 4
| 24.848485
| 95
| 0.590244
|
import pathlib
from bs4 import BeautifulSoup
HTML_LEAF_PAGE_SAMPLE_PATH = pathlib.Path('tests', 'fixtures', 'html', 'leaf_page_sample.html')
HTML_TEXT = ''
def setup():
global HTML_TEXT
with open(HTML_LEAF_PAGE_SAMPLE_PATH, "rt", encoding="utf-8") as handle:
HTML_TEXT = handle.read()
def teardown():
global HTML_TEXT
HTML_TEXT = ''
def test_html_leaf_page_parse_fixture():
lines = [t for t in HTML_TEXT.split('\n') if t.startswith('<a href="')]
parsed = []
for line in lines:
a, x = line.split('">', 1)
f, r = x.split('</a>')
r = r.rstrip()
d, s, u = r.rsplit(' ', 2)
d = d.strip()
parsed.append((f, d, s, u))
for p in parsed:
print(p)
assert len(p) == 4
| true
| true
|
f70b0188a275a756a3c5e6d61a896aefc90b9b12
| 835
|
py
|
Python
|
mysite/polls/models.py
|
3ng7n33r/DjangoTutorial
|
0885d3d9468292c0bf81f5a5fd508fae2c1a482c
|
[
"MIT"
] | 40
|
2018-02-06T09:16:18.000Z
|
2022-03-27T14:56:24.000Z
|
mysite/polls/models.py
|
3ng7n33r/DjangoTutorial
|
0885d3d9468292c0bf81f5a5fd508fae2c1a482c
|
[
"MIT"
] | 12
|
2019-08-06T01:56:51.000Z
|
2022-02-10T09:14:43.000Z
|
mysite/polls/models.py
|
3ng7n33r/DjangoTutorial
|
0885d3d9468292c0bf81f5a5fd508fae2c1a482c
|
[
"MIT"
] | 35
|
2018-06-05T20:27:21.000Z
|
2022-02-23T12:05:40.000Z
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 28.793103
| 71
| 0.736527
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| true
| true
|
f70b035def2ff8729c21add34a0315c29add3dcb
| 1,251
|
py
|
Python
|
PyGame-Tutorials/tut06.py
|
AnubhavMadhav/PyGames
|
d35ac2c8140bdae1b2bf2f6dca057b3b461d11c8
|
[
"Apache-2.0"
] | 5
|
2020-06-04T11:48:09.000Z
|
2020-11-29T08:33:42.000Z
|
PyGame-Tutorials/tut06.py
|
AnubhavMadhav/PyGames
|
d35ac2c8140bdae1b2bf2f6dca057b3b461d11c8
|
[
"Apache-2.0"
] | null | null | null |
PyGame-Tutorials/tut06.py
|
AnubhavMadhav/PyGames
|
d35ac2c8140bdae1b2bf2f6dca057b3b461d11c8
|
[
"Apache-2.0"
] | null | null | null |
'''
Coding our First Game in PyGame
-
Creating Ground for Snakes
'''
import pygame
pygame.init()
# print(x) # All 6 pygame modules successfully imported
# Colors
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
# Creating Game Window
screen_width = 900
screen_height = 600
gameWindow = pygame.display.set_mode((screen_width, screen_height)) # Game Window of 1200x500
pygame.display.set_caption("Snake - by Anubhav Madhav") # Title of the Game, which appears at the top of the window
pygame.display.update() # We need to update our display each and everytime we make a change
# Game Specific Variables
exit_game = False
game_over = False
# Creating a Game Loop
while not exit_game:
for event in pygame.event.get(): # This gets all the events which a user can perform in a game, like mouse hover, mouse click, pressing a certain key etc.
print(event)
if event.type == pygame.QUIT:
exit_game = True
gameWindow.fill(white) # Setting background color as white
pygame.display.update() # Need to update display cause we have made changes to gameWindow
pygame.quit()
quit()
| 28.431818
| 173
| 0.650679
|
import pygame
pygame.init()
black = (0, 0, 0)
screen_width = 900
screen_height = 600
gameWindow = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Snake - by Anubhav Madhav")
pygame.display.update()
exit_game = False
game_over = False
while not exit_game:
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
exit_game = True
gameWindow.fill(white)
pygame.display.update()
pygame.quit()
quit()
| true
| true
|
f70b03c8718a2d81744520d6a0d9e0abea8b40a2
| 124
|
py
|
Python
|
Florence/FiniteElements/Assembly/__init__.py
|
jdlaubrie/florence
|
830dca4a34be00d6e53cbec3007c10d438b27f57
|
[
"MIT"
] | 65
|
2017-08-04T10:21:13.000Z
|
2022-02-21T21:45:09.000Z
|
Florence/FiniteElements/Assembly/__init__.py
|
jdlaubrie/florence
|
830dca4a34be00d6e53cbec3007c10d438b27f57
|
[
"MIT"
] | 6
|
2018-06-03T02:29:20.000Z
|
2022-01-18T02:30:22.000Z
|
Florence/FiniteElements/Assembly/__init__.py
|
jdlaubrie/florence
|
830dca4a34be00d6e53cbec3007c10d438b27f57
|
[
"MIT"
] | 10
|
2018-05-30T09:44:10.000Z
|
2021-05-18T08:06:51.000Z
|
from .Assembly import Assemble, AssembleForces, AssembleInternalTractionForces, AssembleExplicit, AssembleMass, AssembleForm
| 124
| 124
| 0.887097
|
from .Assembly import Assemble, AssembleForces, AssembleInternalTractionForces, AssembleExplicit, AssembleMass, AssembleForm
| true
| true
|
f70b056c1b447b1d1d2392ed2df978ee7b7e61f4
| 7,455
|
py
|
Python
|
prog/python/python tuto/Pygame-Zero/pacman2/pacman2.py
|
gribdesbois/prog-backup
|
a394a392d32c550caf97119456aec1546bc8fbe1
|
[
"MIT"
] | null | null | null |
prog/python/python tuto/Pygame-Zero/pacman2/pacman2.py
|
gribdesbois/prog-backup
|
a394a392d32c550caf97119456aec1546bc8fbe1
|
[
"MIT"
] | null | null | null |
prog/python/python tuto/Pygame-Zero/pacman2/pacman2.py
|
gribdesbois/prog-backup
|
a394a392d32c550caf97119456aec1546bc8fbe1
|
[
"MIT"
] | null | null | null |
import pgzrun
import gameinput
import gamemaps
from random import randint
from datetime import datetime
WIDTH = 600
HEIGHT = 660
player = Actor("pacman_o") # Load in the player Actor image
player.score = 0
player.lives = 3
level = 0
SPEED = 3
def draw(): # Pygame Zero draw function
global pacDots, player
screen.blit('header', (0, 0))
screen.blit('colourmap', (0, 80))
pacDotsLeft = 0
for a in range(len(pacDots)):
if pacDots[a].status == 0:
pacDots[a].draw()
pacDotsLeft += 1
if pacDots[a].collidepoint((player.x, player.y)):
if pacDots[a].status == 0:
if pacDots[a].type == 2:
for g in range(len(ghosts)): ghosts[g].status = 1200
else:
player.score += 10
pacDots[a].status = 1
if pacDotsLeft == 0: player.status = 2
drawGhosts()
getPlayerImage()
player.draw()
drawLives()
screen.draw.text("LEVEL "+str(level) , topleft=(10, 10), owidth=0.5, ocolor=(0,0,255), color=(255,255,0) , fontsize=40)
screen.draw.text(str(player.score) , topright=(590, 20), owidth=0.5, ocolor=(255,255,255), color=(0,64,255) , fontsize=60)
if player.status == 3: drawCentreText("GAME OVER")
if player.status == 2: drawCentreText("LEVEL CLEARED!\nPress Enter or Button A\nto Continue")
if player.status == 1: drawCentreText("CAUGHT!\nPress Enter or Button A\nto Continue")
def drawCentreText(t):
screen.draw.text(t , center=(300, 434), owidth=0.5, ocolor=(255,255,255), color=(255,64,0) , fontsize=60)
def update(): # Pygame Zero update function
global player, moveGhostsFlag, ghosts
if player.status == 0:
if moveGhostsFlag == 4: moveGhosts()
for g in range(len(ghosts)):
if ghosts[g].status > 0: ghosts[g].status -= 1
if ghosts[g].collidepoint((player.x, player.y)):
if ghosts[g].status > 0:
player.score += 100
animate(ghosts[g], pos=(290, 370), duration=1/SPEED, tween='linear', on_finished=flagMoveGhosts)
else:
player.lives -= 1
sounds.pac2.play()
if player.lives == 0:
player.status = 3
music.fadeout(3)
else:
player.status = 1
if player.inputActive:
gameinput.checkInput(player)
gamemaps.checkMovePoint(player)
if player.movex or player.movey:
inputLock()
sounds.pac1.play()
animate(player, pos=(player.x + player.movex, player.y + player.movey), duration=1/SPEED, tween='linear', on_finished=inputUnLock)
if player.status == 1:
i = gameinput.checkInput(player)
if i == 1:
player.status = 0
player.x = 290
player.y = 570
if player.status == 2:
i = gameinput.checkInput(player)
if i == 1:
init()
def init():
global player, level
initDots()
initGhosts()
player.x = 290
player.y = 570
player.status = 0
inputUnLock()
level += 1
music.play("pm1")
music.set_volume(0.2)
def drawLives():
for l in range(player.lives): screen.blit("pacman_o", (10+(l*32),40))
def getPlayerImage():
global player
dt = datetime.now()
a = player.angle
tc = dt.microsecond%(500000/SPEED)/(100000/SPEED)
if tc > 2.5 and (player.movex != 0 or player.movey !=0):
if a != 180:
player.image = "pacman_c"
else:
player.image = "pacman_cr"
else:
if a != 180:
player.image = "pacman_o"
else:
player.image = "pacman_or"
player.angle = a
def drawGhosts():
for g in range(len(ghosts)):
if ghosts[g].x > player.x:
if ghosts[g].status > 200 or (ghosts[g].status > 1 and ghosts[g].status%2 == 0):
ghosts[g].image = "ghost5"
else:
ghosts[g].image = "ghost"+str(g+1)+"r"
else:
if ghosts[g].status > 200 or (ghosts[g].status > 1 and ghosts[g].status%2 == 0):
ghosts[g].image = "ghost5"
else:
ghosts[g].image = "ghost"+str(g+1)
ghosts[g].draw()
def moveGhosts():
global moveGhostsFlag
dmoves = [(1,0),(0,1),(-1,0),(0,-1)]
moveGhostsFlag = 0
for g in range(len(ghosts)):
dirs = gamemaps.getPossibleDirection(ghosts[g])
if inTheCentre(ghosts[g]):
ghosts[g].dir = 3
else:
if g == 0: followPlayer(g, dirs)
if g == 1: ambushPlayer(g, dirs)
if dirs[ghosts[g].dir] == 0 or randint(0,50) == 0:
d = -1
while d == -1:
rd = randint(0,3)
if aboveCentre(ghosts[g]) and rd == 1:
rd = 0
if dirs[rd] == 1:
d = rd
ghosts[g].dir = d
animate(ghosts[g], pos=(ghosts[g].x + dmoves[ghosts[g].dir][0]*20, ghosts[g].y + dmoves[ghosts[g].dir][1]*20), duration=1/SPEED, tween='linear', on_finished=flagMoveGhosts)
def followPlayer(g, dirs):
d = ghosts[g].dir
if d == 1 or d == 3:
if player.x > ghosts[g].x and dirs[0] == 1: ghosts[g].dir = 0
if player.x < ghosts[g].x and dirs[2] == 1: ghosts[g].dir = 2
if d == 0 or d == 2:
if player.y > ghosts[g].y and dirs[1] == 1 and not aboveCentre(ghosts[g]): ghosts[g].dir = 1
if player.y < ghosts[g].y and dirs[3] == 1: ghosts[g].dir = 3
def ambushPlayer(g, dirs):
d = ghosts[g].dir
if player.movex > 0 and dirs[0] == 1: ghosts[g].dir = 0
if player.movex < 0 and dirs[2] == 1: ghosts[g].dir = 2
if player.movey > 0 and dirs[1] == 1 and not aboveCentre(ghosts[g]): ghosts[g].dir = 1
if player.movey < 0 and dirs[3] == 1: ghosts[g].dir = 3
def inTheCentre(ga):
if ga.x > 220 and ga.x < 380 and ga.y > 320 and ga.y < 420:
return True
return False
def aboveCentre(ga):
if ga.x > 220 and ga.x < 380 and ga.y > 300 and ga.y < 320:
return True
return False
def flagMoveGhosts():
global moveGhostsFlag
moveGhostsFlag += 1
def ghostCollided(ga,gn):
for g in range(len(ghosts)):
if ghosts[g].colliderect(ga) and g != gn:
return True
return False
def initDots():
global pacDots
pacDots = []
a = x = 0
while x < 30:
y = 0
while y < 29:
d = gamemaps.checkDotPoint(10+x*20, 10+y*20)
if d == 1:
pacDots.append(Actor("dot",(10+x*20, 90+y*20)))
pacDots[a].status = 0
pacDots[a].type = 1
a += 1
if d == 2:
pacDots.append(Actor("power",(10+x*20, 90+y*20)))
pacDots[a].status = 0
pacDots[a].type = 2
a += 1
y += 1
x += 1
def initGhosts():
global ghosts, moveGhostsFlag
moveGhostsFlag = 4
ghosts = []
g = 0
while g < 4:
ghosts.append(Actor("ghost"+str(g+1),(270+(g*20), 370)))
ghosts[g].dir = randint(0, 3)
ghosts[g].status = 0
g += 1
def inputLock():
global player
player.inputActive = False
def inputUnLock():
global player
player.movex = player.movey = 0
player.inputActive = True
init()
pgzrun.go()
| 32.272727
| 180
| 0.535882
|
import pgzrun
import gameinput
import gamemaps
from random import randint
from datetime import datetime
WIDTH = 600
HEIGHT = 660
player = Actor("pacman_o")
player.score = 0
player.lives = 3
level = 0
SPEED = 3
def draw():
global pacDots, player
screen.blit('header', (0, 0))
screen.blit('colourmap', (0, 80))
pacDotsLeft = 0
for a in range(len(pacDots)):
if pacDots[a].status == 0:
pacDots[a].draw()
pacDotsLeft += 1
if pacDots[a].collidepoint((player.x, player.y)):
if pacDots[a].status == 0:
if pacDots[a].type == 2:
for g in range(len(ghosts)): ghosts[g].status = 1200
else:
player.score += 10
pacDots[a].status = 1
if pacDotsLeft == 0: player.status = 2
drawGhosts()
getPlayerImage()
player.draw()
drawLives()
screen.draw.text("LEVEL "+str(level) , topleft=(10, 10), owidth=0.5, ocolor=(0,0,255), color=(255,255,0) , fontsize=40)
screen.draw.text(str(player.score) , topright=(590, 20), owidth=0.5, ocolor=(255,255,255), color=(0,64,255) , fontsize=60)
if player.status == 3: drawCentreText("GAME OVER")
if player.status == 2: drawCentreText("LEVEL CLEARED!\nPress Enter or Button A\nto Continue")
if player.status == 1: drawCentreText("CAUGHT!\nPress Enter or Button A\nto Continue")
def drawCentreText(t):
screen.draw.text(t , center=(300, 434), owidth=0.5, ocolor=(255,255,255), color=(255,64,0) , fontsize=60)
def update():
global player, moveGhostsFlag, ghosts
if player.status == 0:
if moveGhostsFlag == 4: moveGhosts()
for g in range(len(ghosts)):
if ghosts[g].status > 0: ghosts[g].status -= 1
if ghosts[g].collidepoint((player.x, player.y)):
if ghosts[g].status > 0:
player.score += 100
animate(ghosts[g], pos=(290, 370), duration=1/SPEED, tween='linear', on_finished=flagMoveGhosts)
else:
player.lives -= 1
sounds.pac2.play()
if player.lives == 0:
player.status = 3
music.fadeout(3)
else:
player.status = 1
if player.inputActive:
gameinput.checkInput(player)
gamemaps.checkMovePoint(player)
if player.movex or player.movey:
inputLock()
sounds.pac1.play()
animate(player, pos=(player.x + player.movex, player.y + player.movey), duration=1/SPEED, tween='linear', on_finished=inputUnLock)
if player.status == 1:
i = gameinput.checkInput(player)
if i == 1:
player.status = 0
player.x = 290
player.y = 570
if player.status == 2:
i = gameinput.checkInput(player)
if i == 1:
init()
def init():
global player, level
initDots()
initGhosts()
player.x = 290
player.y = 570
player.status = 0
inputUnLock()
level += 1
music.play("pm1")
music.set_volume(0.2)
def drawLives():
for l in range(player.lives): screen.blit("pacman_o", (10+(l*32),40))
def getPlayerImage():
global player
dt = datetime.now()
a = player.angle
tc = dt.microsecond%(500000/SPEED)/(100000/SPEED)
if tc > 2.5 and (player.movex != 0 or player.movey !=0):
if a != 180:
player.image = "pacman_c"
else:
player.image = "pacman_cr"
else:
if a != 180:
player.image = "pacman_o"
else:
player.image = "pacman_or"
player.angle = a
def drawGhosts():
for g in range(len(ghosts)):
if ghosts[g].x > player.x:
if ghosts[g].status > 200 or (ghosts[g].status > 1 and ghosts[g].status%2 == 0):
ghosts[g].image = "ghost5"
else:
ghosts[g].image = "ghost"+str(g+1)+"r"
else:
if ghosts[g].status > 200 or (ghosts[g].status > 1 and ghosts[g].status%2 == 0):
ghosts[g].image = "ghost5"
else:
ghosts[g].image = "ghost"+str(g+1)
ghosts[g].draw()
def moveGhosts():
global moveGhostsFlag
dmoves = [(1,0),(0,1),(-1,0),(0,-1)]
moveGhostsFlag = 0
for g in range(len(ghosts)):
dirs = gamemaps.getPossibleDirection(ghosts[g])
if inTheCentre(ghosts[g]):
ghosts[g].dir = 3
else:
if g == 0: followPlayer(g, dirs)
if g == 1: ambushPlayer(g, dirs)
if dirs[ghosts[g].dir] == 0 or randint(0,50) == 0:
d = -1
while d == -1:
rd = randint(0,3)
if aboveCentre(ghosts[g]) and rd == 1:
rd = 0
if dirs[rd] == 1:
d = rd
ghosts[g].dir = d
animate(ghosts[g], pos=(ghosts[g].x + dmoves[ghosts[g].dir][0]*20, ghosts[g].y + dmoves[ghosts[g].dir][1]*20), duration=1/SPEED, tween='linear', on_finished=flagMoveGhosts)
def followPlayer(g, dirs):
d = ghosts[g].dir
if d == 1 or d == 3:
if player.x > ghosts[g].x and dirs[0] == 1: ghosts[g].dir = 0
if player.x < ghosts[g].x and dirs[2] == 1: ghosts[g].dir = 2
if d == 0 or d == 2:
if player.y > ghosts[g].y and dirs[1] == 1 and not aboveCentre(ghosts[g]): ghosts[g].dir = 1
if player.y < ghosts[g].y and dirs[3] == 1: ghosts[g].dir = 3
def ambushPlayer(g, dirs):
d = ghosts[g].dir
if player.movex > 0 and dirs[0] == 1: ghosts[g].dir = 0
if player.movex < 0 and dirs[2] == 1: ghosts[g].dir = 2
if player.movey > 0 and dirs[1] == 1 and not aboveCentre(ghosts[g]): ghosts[g].dir = 1
if player.movey < 0 and dirs[3] == 1: ghosts[g].dir = 3
def inTheCentre(ga):
if ga.x > 220 and ga.x < 380 and ga.y > 320 and ga.y < 420:
return True
return False
def aboveCentre(ga):
if ga.x > 220 and ga.x < 380 and ga.y > 300 and ga.y < 320:
return True
return False
def flagMoveGhosts():
global moveGhostsFlag
moveGhostsFlag += 1
def ghostCollided(ga,gn):
for g in range(len(ghosts)):
if ghosts[g].colliderect(ga) and g != gn:
return True
return False
def initDots():
global pacDots
pacDots = []
a = x = 0
while x < 30:
y = 0
while y < 29:
d = gamemaps.checkDotPoint(10+x*20, 10+y*20)
if d == 1:
pacDots.append(Actor("dot",(10+x*20, 90+y*20)))
pacDots[a].status = 0
pacDots[a].type = 1
a += 1
if d == 2:
pacDots.append(Actor("power",(10+x*20, 90+y*20)))
pacDots[a].status = 0
pacDots[a].type = 2
a += 1
y += 1
x += 1
def initGhosts():
global ghosts, moveGhostsFlag
moveGhostsFlag = 4
ghosts = []
g = 0
while g < 4:
ghosts.append(Actor("ghost"+str(g+1),(270+(g*20), 370)))
ghosts[g].dir = randint(0, 3)
ghosts[g].status = 0
g += 1
def inputLock():
global player
player.inputActive = False
def inputUnLock():
global player
player.movex = player.movey = 0
player.inputActive = True
init()
pgzrun.go()
| true
| true
|
f70b06132039891cd3318917fc783ba4c170086b
| 697
|
py
|
Python
|
examples/example_function_order.py
|
leandroltavares/pylint-plus
|
f3ad1a5470f4a99438b39f72a9f4ae690399b08c
|
[
"MIT"
] | null | null | null |
examples/example_function_order.py
|
leandroltavares/pylint-plus
|
f3ad1a5470f4a99438b39f72a9f4ae690399b08c
|
[
"MIT"
] | null | null | null |
examples/example_function_order.py
|
leandroltavares/pylint-plus
|
f3ad1a5470f4a99438b39f72a9f4ae690399b08c
|
[
"MIT"
] | null | null | null |
#pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring,no-self-use,too-few-public-methods
def first(): # First should be defined after second, too keep call order
pass
def second():
first()
class Example:
def first(self): # First should be defined after second, too keep call order
pass
def second(self):
self.first()
def before(self): # 'Before' is placed correctly before 'after'
self.after()
def after(self):
pass
class ExampleInner:
def outer(self):
def inner(): # Inner functions are an exception, these must be defined before their usage
pass
inner()
| 23.233333
| 127
| 0.657102
|
def first():
pass
def second():
first()
class Example:
def first(self):
pass
def second(self):
self.first()
def before(self):
self.after()
def after(self):
pass
class ExampleInner:
def outer(self):
def inner():
pass
inner()
| true
| true
|
f70b061d2606ca0be36e23f56f65b717929eb470
| 104
|
py
|
Python
|
calculator/__init__.py
|
goncalovalverde/seshat
|
deff5cdd985f81ac2b4ebd077eea11f7c4f4118f
|
[
"MIT"
] | 1
|
2020-12-22T13:23:00.000Z
|
2020-12-22T13:23:00.000Z
|
calculator/__init__.py
|
goncalovalverde/seshat
|
deff5cdd985f81ac2b4ebd077eea11f7c4f4118f
|
[
"MIT"
] | 5
|
2020-12-22T13:36:30.000Z
|
2021-02-27T05:42:18.000Z
|
calculator/__init__.py
|
goncalovalverde/seshat
|
deff5cdd985f81ac2b4ebd077eea11f7c4f4118f
|
[
"MIT"
] | null | null | null |
import logging
class Calculator(object):
def __init__(self, config):
self.config = config
| 14.857143
| 31
| 0.682692
|
import logging
class Calculator(object):
def __init__(self, config):
self.config = config
| true
| true
|
f70b0657a109c516768a303f19153456024b4d50
| 3,686
|
py
|
Python
|
tests/conftest.py
|
datdinhquoc/flask_jsondash
|
124f5739aebb39c4d36d27a57acb1a32df95a51d
|
[
"MIT"
] | 3,503
|
2016-08-25T19:57:33.000Z
|
2022-03-31T20:04:37.000Z
|
tests/conftest.py
|
datdinhquoc/flask_jsondash
|
124f5739aebb39c4d36d27a57acb1a32df95a51d
|
[
"MIT"
] | 203
|
2016-05-06T18:01:12.000Z
|
2022-03-23T09:05:28.000Z
|
tests/conftest.py
|
datdinhquoc/flask_jsondash
|
124f5739aebb39c4d36d27a57acb1a32df95a51d
|
[
"MIT"
] | 350
|
2016-08-30T10:29:57.000Z
|
2022-02-02T17:59:41.000Z
|
import json
import os
import pytest
from flask import Flask, url_for
from pyquery import PyQuery as pq
from flask_jsondash import charts_builder, utils
from flask_jsondash import db
URL_BASE = 'http://127.0.0.1:80'
app = Flask('test_flask_jsondash',
template_folder='../flask_jsondash/example_app/templates')
app.config.update(
# Required to fix context errors.
# See https://github.com/jarus/flask-testing/issues/21
PRESERVE_CONTEXT_ON_EXCEPTION=False,
SECRET_KEY='123',
)
app.debug = True
app.register_blueprint(charts_builder.charts)
fake_db = []
def _username():
return 'Username'
def auth_valid(**kwargs):
return True
def auth_invalid(**kwargs):
return False
def get_json_config(name):
parent = os.getcwd().replace('tests/', '')
path = '{0}/example_app/examples/config/{1}'.format(parent, name)
view = json.load(open(path, 'r'))
return view
def read(*args, **kwargs):
if 'override' in kwargs:
newkwargs = kwargs.pop('override')
def _read(*args, **kwargs):
return dict(**newkwargs)
return _read
if 'c_id' not in kwargs:
return fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == kwargs.get('c_id'):
return dash
def delete(c_id, **kwargs):
global fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == c_id:
del fake_db[i]
break
def create(*args, **kwargs):
global fake_db
fake_db.append(dict(**kwargs.get('data')))
def update(c_id, **kwargs):
global fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == c_id:
fake_db[i].update(**kwargs)
break
def setup_dashboard(monkeypatch, app, test, data):
"""Helper function to setup dashboard, redirect, and get its html."""
assert len(read()) == 0
monkeypatch.setattr(charts_builder, 'auth', auth_valid)
test.post(url_for('jsondash.create'), data=data, follow_redirects=True)
view_id = read()[0]['id']
assert len(read()) == 1
url = url_for('jsondash.view', c_id=view_id)
res = test.get(url)
dom = pq(res.data)
return dom
def make_chart(**kwargs):
"""Create a fake chart."""
data = dict(
name='somechart',
width=1,
height=1,
family='C3',
type='line',
row=1,
dataSource='...',
)
data.update(**kwargs)
return json.dumps(data)
@pytest.yield_fixture(autouse=True)
def ctx(monkeypatch, request):
with app.test_request_context() as req_ctx:
global fake_db
fake_db = []
monkeypatch.setattr(utils.adapter, 'read', read)
monkeypatch.setattr(utils.adapter, 'create', create)
monkeypatch.setattr(utils.adapter, 'delete', delete)
monkeypatch.setattr(utils.adapter, 'update', update)
monkeypatch.setattr(utils.adapter, 'filter', read)
yield req_ctx
@pytest.fixture()
def adapter():
return db.get_db_handler()
@pytest.fixture()
def client():
app.config.update(
JSONDASH_GLOBALDASH=False,
JSONDASH_FILTERUSERS=False,
JSONDASH_GLOBAL_USER='global-test',
)
app.config['JSONDASH'] = dict(
metadata=dict(
created_by=_username,
username=_username,
),
static=dict(
js_path='js/vendor/',
css_path='css/vendor/',
),
auth=dict(
edit_others=auth_invalid,
edit_global=auth_invalid,
create=auth_invalid,
view=auth_invalid,
clone=auth_invalid,
delete=auth_invalid,
)
)
return app, app.test_client()
| 24.091503
| 75
| 0.616658
|
import json
import os
import pytest
from flask import Flask, url_for
from pyquery import PyQuery as pq
from flask_jsondash import charts_builder, utils
from flask_jsondash import db
URL_BASE = 'http://127.0.0.1:80'
app = Flask('test_flask_jsondash',
template_folder='../flask_jsondash/example_app/templates')
app.config.update(
PRESERVE_CONTEXT_ON_EXCEPTION=False,
SECRET_KEY='123',
)
app.debug = True
app.register_blueprint(charts_builder.charts)
fake_db = []
def _username():
return 'Username'
def auth_valid(**kwargs):
return True
def auth_invalid(**kwargs):
return False
def get_json_config(name):
parent = os.getcwd().replace('tests/', '')
path = '{0}/example_app/examples/config/{1}'.format(parent, name)
view = json.load(open(path, 'r'))
return view
def read(*args, **kwargs):
if 'override' in kwargs:
newkwargs = kwargs.pop('override')
def _read(*args, **kwargs):
return dict(**newkwargs)
return _read
if 'c_id' not in kwargs:
return fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == kwargs.get('c_id'):
return dash
def delete(c_id, **kwargs):
global fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == c_id:
del fake_db[i]
break
def create(*args, **kwargs):
global fake_db
fake_db.append(dict(**kwargs.get('data')))
def update(c_id, **kwargs):
global fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == c_id:
fake_db[i].update(**kwargs)
break
def setup_dashboard(monkeypatch, app, test, data):
assert len(read()) == 0
monkeypatch.setattr(charts_builder, 'auth', auth_valid)
test.post(url_for('jsondash.create'), data=data, follow_redirects=True)
view_id = read()[0]['id']
assert len(read()) == 1
url = url_for('jsondash.view', c_id=view_id)
res = test.get(url)
dom = pq(res.data)
return dom
def make_chart(**kwargs):
data = dict(
name='somechart',
width=1,
height=1,
family='C3',
type='line',
row=1,
dataSource='...',
)
data.update(**kwargs)
return json.dumps(data)
@pytest.yield_fixture(autouse=True)
def ctx(monkeypatch, request):
with app.test_request_context() as req_ctx:
global fake_db
fake_db = []
monkeypatch.setattr(utils.adapter, 'read', read)
monkeypatch.setattr(utils.adapter, 'create', create)
monkeypatch.setattr(utils.adapter, 'delete', delete)
monkeypatch.setattr(utils.adapter, 'update', update)
monkeypatch.setattr(utils.adapter, 'filter', read)
yield req_ctx
@pytest.fixture()
def adapter():
return db.get_db_handler()
@pytest.fixture()
def client():
app.config.update(
JSONDASH_GLOBALDASH=False,
JSONDASH_FILTERUSERS=False,
JSONDASH_GLOBAL_USER='global-test',
)
app.config['JSONDASH'] = dict(
metadata=dict(
created_by=_username,
username=_username,
),
static=dict(
js_path='js/vendor/',
css_path='css/vendor/',
),
auth=dict(
edit_others=auth_invalid,
edit_global=auth_invalid,
create=auth_invalid,
view=auth_invalid,
clone=auth_invalid,
delete=auth_invalid,
)
)
return app, app.test_client()
| true
| true
|
f70b06873d5edf44d17aafe0818fcc3b08d0f79f
| 1,866
|
py
|
Python
|
sam-app-py/tests/unit/test_handler.py
|
abhinavDhulipala/SAM-URL
|
2edaaf11f5baa0153e6ee943635c5d86a55cd84f
|
[
"MIT"
] | 1
|
2021-04-07T02:44:29.000Z
|
2021-04-07T02:44:29.000Z
|
sam-app-py/tests/unit/test_handler.py
|
abhinavDhulipala/SAM-URL
|
2edaaf11f5baa0153e6ee943635c5d86a55cd84f
|
[
"MIT"
] | null | null | null |
sam-app-py/tests/unit/test_handler.py
|
abhinavDhulipala/SAM-URL
|
2edaaf11f5baa0153e6ee943635c5d86a55cd84f
|
[
"MIT"
] | null | null | null |
import json
import pytest
import os
import sys
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(f'{abs_path}/../..')
sys.path.append(f'{abs_path}/../../..')
print(sys.path[-1])
from moto import mock_dynamodb2
from redirect_handler import app
import boto_utils
from constants import TABLE_NAME
import boto3
@pytest.fixture()
def apigw_event():
""" Generates API GW Event"""
with open('./events/redirect_simple_event.json') as fp:
return json.load(fp)
def test_lambda_handler(apigw_event):
# Note put must work. You should have a test entry in your DB under the entry '1234567' for you to pass this test
@mock_dynamodb2
def mock_events():
dynamodb = boto3.resource('dynamodb')
created_table = dynamodb.create_table(
TableName=TABLE_NAME,
KeySchema=[
{
'AttributeName': 'redirect_url',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'redirect_url',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
boto_utils.put('https://example.com', '1234567', '', '')
mock_events()
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] == 302
assert 'location' in ret['headers']
failed_codes = {206, 204}
apigw_event['pathParameters']['hash'] = apigw_event['pathParameters']['hash'][:-1]
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] in failed_codes
apigw_event['pathParameters']['hash'] = 'garbage'
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] in failed_codes
| 29.619048
| 117
| 0.595927
|
import json
import pytest
import os
import sys
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(f'{abs_path}/../..')
sys.path.append(f'{abs_path}/../../..')
print(sys.path[-1])
from moto import mock_dynamodb2
from redirect_handler import app
import boto_utils
from constants import TABLE_NAME
import boto3
@pytest.fixture()
def apigw_event():
with open('./events/redirect_simple_event.json') as fp:
return json.load(fp)
def test_lambda_handler(apigw_event):
@mock_dynamodb2
def mock_events():
dynamodb = boto3.resource('dynamodb')
created_table = dynamodb.create_table(
TableName=TABLE_NAME,
KeySchema=[
{
'AttributeName': 'redirect_url',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'redirect_url',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
boto_utils.put('https://example.com', '1234567', '', '')
mock_events()
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] == 302
assert 'location' in ret['headers']
failed_codes = {206, 204}
apigw_event['pathParameters']['hash'] = apigw_event['pathParameters']['hash'][:-1]
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] in failed_codes
apigw_event['pathParameters']['hash'] = 'garbage'
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] in failed_codes
| true
| true
|
f70b0689e2b44f236e300dba244ccadd6bdde193
| 4,551
|
py
|
Python
|
test method/tensorflow2.0/deep-sort-yolov4/demo.py
|
vedanthpadigelwar/AI_projects
|
885bbe76800f9a449414b3735ab4a4c4bd2e7aa0
|
[
"MIT"
] | null | null | null |
test method/tensorflow2.0/deep-sort-yolov4/demo.py
|
vedanthpadigelwar/AI_projects
|
885bbe76800f9a449414b3735ab4a4c4bd2e7aa0
|
[
"MIT"
] | null | null | null |
test method/tensorflow2.0/deep-sort-yolov4/demo.py
|
vedanthpadigelwar/AI_projects
|
885bbe76800f9a449414b3735ab4a4c4bd2e7aa0
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from timeit import time
import warnings
import cv2
import numpy as np
from PIL import Image
from yolo import YOLO
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.detection_yolo import Detection_YOLO
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
import imutils.video
from videocaptureasync import VideoCaptureAsync
warnings.filterwarnings('ignore')
def main(yolo):
# Definition of the parameters
max_cosine_distance = 0.3
nn_budget = None
nms_max_overlap = 1.0
# Deep SORT
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
tracking = True
writeVideo_flag = True
asyncVideo_flag = False
file_path = 'video.webm'
if asyncVideo_flag:
video_capture = VideoCaptureAsync(file_path)
else:
video_capture = cv2.VideoCapture(file_path)
if asyncVideo_flag:
video_capture.start()
if writeVideo_flag:
if asyncVideo_flag:
w = int(video_capture.cap.get(3))
h = int(video_capture.cap.get(4))
else:
w = int(video_capture.get(3))
h = int(video_capture.get(4))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
frame_index = -1
fps = 0.0
fps_imutils = imutils.video.FPS().start()
while True:
ret, frame = video_capture.read() # frame shape 640*480*3
if ret != True:
break
t1 = time.time()
image = Image.fromarray(frame[..., ::-1]) # bgr to rgb
boxes, confidence, classes = yolo.detect_image(image)
if tracking:
features = encoder(frame, boxes)
detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
zip(boxes, confidence, classes, features)]
else:
detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in
zip(boxes, confidence, classes)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = preprocessing.non_max_suppression(
boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
if tracking:
# Call the tracker
tracker.predict()
tracker.update(detections)
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(
bbox[2]), int(bbox[3])), (255, 255, 255), 2)
cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,
1.5e-3 * frame.shape[0], (0, 255, 0), 1)
for det in detections:
bbox = det.to_tlbr()
score = "%.2f" % round(det.confidence * 100, 2) + "%"
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(
bbox[2]), int(bbox[3])), (255, 0, 0), 2)
if len(classes) > 0:
cls = det.cls
cv2.putText(frame, str(cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
1.5e-3 * frame.shape[0], (0, 255, 0), 1)
cv2.imshow('', frame)
if writeVideo_flag: # and not asyncVideo_flag:
# save a frame
out.write(frame)
frame_index = frame_index + 1
fps_imutils.update()
if not asyncVideo_flag:
fps = (fps + (1./(time.time()-t1))) / 2
print("FPS = %f" % (fps))
# Press Q to stop!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps_imutils.stop()
print('imutils FPS: {}'.format(fps_imutils.fps()))
if asyncVideo_flag:
video_capture.stop()
else:
video_capture.release()
if writeVideo_flag:
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main(YOLO())
| 30.34
| 105
| 0.587124
|
from __future__ import division, print_function, absolute_import
from timeit import time
import warnings
import cv2
import numpy as np
from PIL import Image
from yolo import YOLO
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.detection_yolo import Detection_YOLO
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
import imutils.video
from videocaptureasync import VideoCaptureAsync
warnings.filterwarnings('ignore')
def main(yolo):
max_cosine_distance = 0.3
nn_budget = None
nms_max_overlap = 1.0
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
tracking = True
writeVideo_flag = True
asyncVideo_flag = False
file_path = 'video.webm'
if asyncVideo_flag:
video_capture = VideoCaptureAsync(file_path)
else:
video_capture = cv2.VideoCapture(file_path)
if asyncVideo_flag:
video_capture.start()
if writeVideo_flag:
if asyncVideo_flag:
w = int(video_capture.cap.get(3))
h = int(video_capture.cap.get(4))
else:
w = int(video_capture.get(3))
h = int(video_capture.get(4))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
frame_index = -1
fps = 0.0
fps_imutils = imutils.video.FPS().start()
while True:
ret, frame = video_capture.read()
if ret != True:
break
t1 = time.time()
image = Image.fromarray(frame[..., ::-1])
boxes, confidence, classes = yolo.detect_image(image)
if tracking:
features = encoder(frame, boxes)
detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
zip(boxes, confidence, classes, features)]
else:
detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in
zip(boxes, confidence, classes)]
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = preprocessing.non_max_suppression(
boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
if tracking:
tracker.predict()
tracker.update(detections)
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(
bbox[2]), int(bbox[3])), (255, 255, 255), 2)
cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,
1.5e-3 * frame.shape[0], (0, 255, 0), 1)
for det in detections:
bbox = det.to_tlbr()
score = "%.2f" % round(det.confidence * 100, 2) + "%"
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(
bbox[2]), int(bbox[3])), (255, 0, 0), 2)
if len(classes) > 0:
cls = det.cls
cv2.putText(frame, str(cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
1.5e-3 * frame.shape[0], (0, 255, 0), 1)
cv2.imshow('', frame)
if writeVideo_flag:
out.write(frame)
frame_index = frame_index + 1
fps_imutils.update()
if not asyncVideo_flag:
fps = (fps + (1./(time.time()-t1))) / 2
print("FPS = %f" % (fps))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps_imutils.stop()
print('imutils FPS: {}'.format(fps_imutils.fps()))
if asyncVideo_flag:
video_capture.stop()
else:
video_capture.release()
if writeVideo_flag:
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main(YOLO())
| true
| true
|
f70b07204b98d80e64ad1e1deb637e4254ae138a
| 1,331
|
py
|
Python
|
var/spack/repos/builtin/packages/py-mypy/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2020-09-02T08:41:39.000Z
|
2020-09-02T08:41:39.000Z
|
var/spack/repos/builtin/packages/py-mypy/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 25
|
2021-02-08T14:39:48.000Z
|
2022-03-21T18:37:29.000Z
|
var/spack/repos/builtin/packages/py-mypy/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 7
|
2018-09-13T18:04:56.000Z
|
2020-03-18T20:52:06.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMypy(PythonPackage):
"""Optional static typing for Python."""
homepage = "http://www.mypy-lang.org/"
pypi = "mypy/mypy-0.740.tar.gz"
version('0.910', sha256='704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150')
version('0.900', sha256='65c78570329c54fb40f956f7645e2359af5da9d8c54baa44f461cdc7f4984108')
version('0.800', sha256='e0202e37756ed09daf4b0ba64ad2c245d357659e014c3f51d8cd0681ba66940a')
version('0.790', sha256='2b21ba45ad9ef2e2eb88ce4aeadd0112d0f5026418324176fd494a6824b74975')
version('0.740', sha256='48c8bc99380575deb39f5d3400ebb6a8a1cb5cc669bbba4d3bb30f904e0a0e7d')
variant('python2', default=False, description='Enable checking of python 2 type annotations')
depends_on("python@3.5:", type=("build", "run"))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-typed-ast@1.4.0:1.4', type=('build', 'run'))
depends_on('py-typing-extensions@3.7.4:', type=('build', 'run'))
depends_on('py-mypy-extensions@0.4.3:0.4', type=('build', 'run'))
depends_on('py-toml', when='@0.900:', type=('build', 'run'))
| 45.896552
| 97
| 0.730278
|
from spack import *
class PyMypy(PythonPackage):
homepage = "http://www.mypy-lang.org/"
pypi = "mypy/mypy-0.740.tar.gz"
version('0.910', sha256='704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150')
version('0.900', sha256='65c78570329c54fb40f956f7645e2359af5da9d8c54baa44f461cdc7f4984108')
version('0.800', sha256='e0202e37756ed09daf4b0ba64ad2c245d357659e014c3f51d8cd0681ba66940a')
version('0.790', sha256='2b21ba45ad9ef2e2eb88ce4aeadd0112d0f5026418324176fd494a6824b74975')
version('0.740', sha256='48c8bc99380575deb39f5d3400ebb6a8a1cb5cc669bbba4d3bb30f904e0a0e7d')
variant('python2', default=False, description='Enable checking of python 2 type annotations')
depends_on("python@3.5:", type=("build", "run"))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-typed-ast@1.4.0:1.4', type=('build', 'run'))
depends_on('py-typing-extensions@3.7.4:', type=('build', 'run'))
depends_on('py-mypy-extensions@0.4.3:0.4', type=('build', 'run'))
depends_on('py-toml', when='@0.900:', type=('build', 'run'))
| true
| true
|
f70b076d6e55129ea6d8cf88397c36dbdcabc138
| 3,122
|
py
|
Python
|
superset/models/schedules.py
|
EikotheRookie/incubator-superset-xzf
|
5d167afb9499d7ce30c7ea763b19993af347dc23
|
[
"Apache-2.0"
] | 1
|
2020-06-25T14:30:12.000Z
|
2020-06-25T14:30:12.000Z
|
superset/models/schedules.py
|
EikotheRookie/incubator-superset-xzf
|
5d167afb9499d7ce30c7ea763b19993af347dc23
|
[
"Apache-2.0"
] | 49
|
2021-06-08T22:27:53.000Z
|
2022-03-28T15:59:51.000Z
|
superset/models/schedules.py
|
hixio-mh/incubator-superset
|
7b7e097325fa8f6f785fe15b83f39e922025022f
|
[
"Apache-2.0"
] | 2
|
2019-07-19T09:34:24.000Z
|
2019-09-20T10:02:04.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Models for scheduled execution of jobs"""
import enum
from typing import Optional, Type
from flask_appbuilder import Model
from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from superset import security_manager
from superset.models.helpers import AuditMixinNullable, ImportMixin
metadata = Model.metadata # pylint: disable=no-member
class ScheduleType(str, enum.Enum):
slice = "slice"
dashboard = "dashboard"
class EmailDeliveryType(str, enum.Enum):
attachment = "Attachment"
inline = "Inline"
class SliceEmailReportFormat(str, enum.Enum):
visualization = "Visualization"
data = "Raw data"
class EmailSchedule:
"""Schedules for emailing slices / dashboards"""
__tablename__ = "email_schedules"
id = Column(Integer, primary_key=True)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(50))
@declared_attr
def user_id(self):
return Column(Integer, ForeignKey("ab_user.id"))
@declared_attr
def user(self):
return relationship(
security_manager.user_model,
backref=self.__tablename__,
foreign_keys=[self.user_id],
)
recipients = Column(Text)
deliver_as_group = Column(Boolean, default=False)
delivery_type = Column(Enum(EmailDeliveryType))
class DashboardEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "dashboard_email_schedules"
dashboard_id = Column(Integer, ForeignKey("dashboards.id"))
dashboard = relationship(
"Dashboard", backref="email_schedules", foreign_keys=[dashboard_id]
)
class SliceEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "slice_email_schedules"
slice_id = Column(Integer, ForeignKey("slices.id"))
slice = relationship("Slice", backref="email_schedules", foreign_keys=[slice_id])
email_format = Column(Enum(SliceEmailReportFormat))
def get_scheduler_model(report_type: ScheduleType) -> Optional[Type[EmailSchedule]]:
if report_type == ScheduleType.dashboard:
return DashboardEmailSchedule
elif report_type == ScheduleType.slice:
return SliceEmailSchedule
return None
| 32.863158
| 85
| 0.744395
|
import enum
from typing import Optional, Type
from flask_appbuilder import Model
from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from superset import security_manager
from superset.models.helpers import AuditMixinNullable, ImportMixin
metadata = Model.metadata
class ScheduleType(str, enum.Enum):
slice = "slice"
dashboard = "dashboard"
class EmailDeliveryType(str, enum.Enum):
attachment = "Attachment"
inline = "Inline"
class SliceEmailReportFormat(str, enum.Enum):
visualization = "Visualization"
data = "Raw data"
class EmailSchedule:
__tablename__ = "email_schedules"
id = Column(Integer, primary_key=True)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(50))
@declared_attr
def user_id(self):
return Column(Integer, ForeignKey("ab_user.id"))
@declared_attr
def user(self):
return relationship(
security_manager.user_model,
backref=self.__tablename__,
foreign_keys=[self.user_id],
)
recipients = Column(Text)
deliver_as_group = Column(Boolean, default=False)
delivery_type = Column(Enum(EmailDeliveryType))
class DashboardEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "dashboard_email_schedules"
dashboard_id = Column(Integer, ForeignKey("dashboards.id"))
dashboard = relationship(
"Dashboard", backref="email_schedules", foreign_keys=[dashboard_id]
)
class SliceEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "slice_email_schedules"
slice_id = Column(Integer, ForeignKey("slices.id"))
slice = relationship("Slice", backref="email_schedules", foreign_keys=[slice_id])
email_format = Column(Enum(SliceEmailReportFormat))
def get_scheduler_model(report_type: ScheduleType) -> Optional[Type[EmailSchedule]]:
if report_type == ScheduleType.dashboard:
return DashboardEmailSchedule
elif report_type == ScheduleType.slice:
return SliceEmailSchedule
return None
| true
| true
|
f70b07933f8381b9d635ee33b267d6a4228698c7
| 3,662
|
py
|
Python
|
ebay_accounts/trading_api.py
|
luke-dixon/django-ebay-accounts
|
54cf0e90b75dfbdd63bcd588f3c4771ebe1297c0
|
[
"BSD-3-Clause"
] | 4
|
2018-01-28T20:10:11.000Z
|
2020-09-06T14:30:36.000Z
|
ebay_accounts/trading_api.py
|
luke-dixon/django-ebay-accounts
|
54cf0e90b75dfbdd63bcd588f3c4771ebe1297c0
|
[
"BSD-3-Clause"
] | 7
|
2017-06-04T08:50:06.000Z
|
2020-09-06T16:03:53.000Z
|
ebay_accounts/trading_api.py
|
luke-dixon/django-ebay-accounts
|
54cf0e90b75dfbdd63bcd588f3c4771ebe1297c0
|
[
"BSD-3-Clause"
] | 7
|
2017-06-01T09:51:35.000Z
|
2021-05-25T16:01:53.000Z
|
# -*- coding: utf-8 -*-
"""
Ebay Trading API
"""
import xmltodict
import requests
from . import app_settings as settings
class TradingAPIWarning(Exception):
pass
class TradingAPIFailure(Exception):
pass
class TradingAPIInvalidResponse(Exception):
pass
class TradingAPI(object):
_last_response = None
def __init__(self, production=False, site_id=0, token=None):
self.production = production
if self.production is True:
self._dev_id = settings.EBAY_PRODUCTION_DEVID
self._app_id = settings.EBAY_PRODUCTION_APPID
self._cert_id = settings.EBAY_PRODUCTION_CERTID
self._endpoint = settings.EBAY_PRODUCTION_TRADING_API_ENDPOINT
self.ru_name = settings.EBAY_PRODUCTION_RU_NAME
else:
self._dev_id = settings.EBAY_SANDBOX_DEVID
self._app_id = settings.EBAY_SANDBOX_APPID
self._cert_id = settings.EBAY_SANDBOX_CERTID
self._endpoint = settings.EBAY_SANDBOX_TRADING_API_ENDPOINT
self.ru_name = settings.EBAY_SANDBOX_RU_NAME
self.site_id = site_id
self.version = settings.EBAY_TRADING_API_VERSION
self._token = token
def _get_requester_credentials(self):
return {'eBayAuthToken': self._token}
def _get_headers(self, call):
return {
'X-EBAY-API-COMPATIBILITY-LEVEL': str(self.version),
'X-EBAY-API-DEV-NAME': self._dev_id,
'X-EBAY-API-APP-NAME': self._app_id,
'X-EBAY-API-CERT-NAME': self._cert_id,
'X-EBAY-API-SITEID': str(self.site_id),
'X-EBAY-API-CALL-NAME': call,
}
def _get_xml_request(self, call, kw_dict, include_requester_credentials):
request_key = '{call}Request'.format(call=call)
request_dict = {request_key: {
'@xmlns': 'urn:ebay:apis:eBLBaseComponents',
}}
for key, value in kw_dict.items():
request_dict[request_key][key] = value
if self._token and include_requester_credentials:
credentials = self._get_requester_credentials()
request_dict[request_key]['RequesterCredentials'] = credentials
data = xmltodict.unparse(request_dict)
return data
def _get_data_from_response(self, call, data, response):
d = xmltodict.parse(response.content)
response_key = '{call}Response'.format(call=call)
data = d[response_key]
return data
def execute(
self,
call,
kw_dict,
include_requester_credentials=True,
raise_on_warning=False,
raise_on_failure=True):
headers = self._get_headers(call)
data = self._get_xml_request(
call, kw_dict, include_requester_credentials)
response = requests.post(self._endpoint, data=data, headers=headers)
self._last_response = response
response_data = self._get_data_from_response(call, data, response)
if 'Ack' not in response_data:
raise TradingAPIInvalidResponse('No Ack field in response')
if raise_on_failure and response_data['Ack'].lower() == 'failure':
raise TradingAPIFailure('{0}'.format(response_data.get(
'Errors', 'No error list found')))
if raise_on_warning and response_data['Ack'].lower() == 'warning':
raise TradingAPIWarning('{0}'.format(response_data.get(
'Errors', 'No error list found')))
return response_data
def set_token(self, token):
self._token = token
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
| 35.211538
| 77
| 0.648553
|
import xmltodict
import requests
from . import app_settings as settings
class TradingAPIWarning(Exception):
pass
class TradingAPIFailure(Exception):
pass
class TradingAPIInvalidResponse(Exception):
pass
class TradingAPI(object):
_last_response = None
def __init__(self, production=False, site_id=0, token=None):
self.production = production
if self.production is True:
self._dev_id = settings.EBAY_PRODUCTION_DEVID
self._app_id = settings.EBAY_PRODUCTION_APPID
self._cert_id = settings.EBAY_PRODUCTION_CERTID
self._endpoint = settings.EBAY_PRODUCTION_TRADING_API_ENDPOINT
self.ru_name = settings.EBAY_PRODUCTION_RU_NAME
else:
self._dev_id = settings.EBAY_SANDBOX_DEVID
self._app_id = settings.EBAY_SANDBOX_APPID
self._cert_id = settings.EBAY_SANDBOX_CERTID
self._endpoint = settings.EBAY_SANDBOX_TRADING_API_ENDPOINT
self.ru_name = settings.EBAY_SANDBOX_RU_NAME
self.site_id = site_id
self.version = settings.EBAY_TRADING_API_VERSION
self._token = token
def _get_requester_credentials(self):
return {'eBayAuthToken': self._token}
def _get_headers(self, call):
return {
'X-EBAY-API-COMPATIBILITY-LEVEL': str(self.version),
'X-EBAY-API-DEV-NAME': self._dev_id,
'X-EBAY-API-APP-NAME': self._app_id,
'X-EBAY-API-CERT-NAME': self._cert_id,
'X-EBAY-API-SITEID': str(self.site_id),
'X-EBAY-API-CALL-NAME': call,
}
def _get_xml_request(self, call, kw_dict, include_requester_credentials):
request_key = '{call}Request'.format(call=call)
request_dict = {request_key: {
'@xmlns': 'urn:ebay:apis:eBLBaseComponents',
}}
for key, value in kw_dict.items():
request_dict[request_key][key] = value
if self._token and include_requester_credentials:
credentials = self._get_requester_credentials()
request_dict[request_key]['RequesterCredentials'] = credentials
data = xmltodict.unparse(request_dict)
return data
def _get_data_from_response(self, call, data, response):
d = xmltodict.parse(response.content)
response_key = '{call}Response'.format(call=call)
data = d[response_key]
return data
def execute(
self,
call,
kw_dict,
include_requester_credentials=True,
raise_on_warning=False,
raise_on_failure=True):
headers = self._get_headers(call)
data = self._get_xml_request(
call, kw_dict, include_requester_credentials)
response = requests.post(self._endpoint, data=data, headers=headers)
self._last_response = response
response_data = self._get_data_from_response(call, data, response)
if 'Ack' not in response_data:
raise TradingAPIInvalidResponse('No Ack field in response')
if raise_on_failure and response_data['Ack'].lower() == 'failure':
raise TradingAPIFailure('{0}'.format(response_data.get(
'Errors', 'No error list found')))
if raise_on_warning and response_data['Ack'].lower() == 'warning':
raise TradingAPIWarning('{0}'.format(response_data.get(
'Errors', 'No error list found')))
return response_data
def set_token(self, token):
self._token = token
| true
| true
|
f70b081736313ab52d82208c2436e124a1ec7ba4
| 2,555
|
py
|
Python
|
tests/cli/test_keyboard.py
|
RasaHQ/taipo
|
0a0488a591362eca44a7a315cf38f44393b8d209
|
[
"MIT"
] | 28
|
2021-06-16T14:08:10.000Z
|
2022-03-25T13:26:29.000Z
|
tests/cli/test_keyboard.py
|
RasaHQ/taipo
|
0a0488a591362eca44a7a315cf38f44393b8d209
|
[
"MIT"
] | 16
|
2021-06-29T17:13:48.000Z
|
2021-12-13T17:22:13.000Z
|
tests/cli/test_keyboard.py
|
RasaHQ/taipo
|
0a0488a591362eca44a7a315cf38f44393b8d209
|
[
"MIT"
] | 6
|
2021-07-06T17:34:43.000Z
|
2022-03-11T10:50:00.000Z
|
import pathlib
import re
import pytest
from typer.testing import CliRunner
from taipo.__main__ import app
from taipo.common import nlu_path_to_dataframe
runner = CliRunner()
@pytest.mark.parametrize(
"path_in,path_out", [("nlu.yml", "nlu.yml"), ("foobar.yml", "foobar.yml")]
)
def test_keyboard_augment(tmp_path, path_in, path_out):
"""Ensure basic usage of command works."""
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/{path_in}",
]
runner.invoke(app, cmd)
expected = nlu_path_to_dataframe("tests/data/nlu/nlu.yml").shape
assert nlu_path_to_dataframe(f"{tmp_path}/{path_out}").shape == expected
def test_keyboard_augment_keeps_annotations(tmp_path):
"""Ensure the format of entity annotations is kept correctly."""
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/nlu.yml",
]
runner.invoke(app, cmd)
df_in = nlu_path_to_dataframe("tests/data/nlu/nlu.yml")
df_out = nlu_path_to_dataframe(f"{tmp_path}/nlu.yml")
annotation_pattern = r"\[\w+\]\(\w+\)"
for text_in, text_out in zip(df_in.text, df_out.text):
annotations_in = re.findall(annotation_pattern, text_in)
annotations_out = re.findall(annotation_pattern, text_out)
assert len(annotations_in) == len(annotations_out)
@pytest.mark.parametrize(
"lang", ["de", "en", "es", "fr", "he", "it", "nl", "pl", "th", "uk"]
)
def test_keyboard_lang(tmp_path, lang):
"""
Ensure that the languages listed in nlpaug indeed work.
https://github.com/makcedward/nlpaug/tree/master/nlpaug/res/char/keyboard
"""
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/nlu.yml",
"--lang",
lang,
]
runner.invoke(app, cmd)
expected = nlu_path_to_dataframe("tests/data/nlu/nlu.yml").shape
assert nlu_path_to_dataframe(f"{tmp_path}/nlu.yml").shape == expected
def test_keyboard_generate():
"""Ensure basic usage of command works."""
files = [
"data/nlu-train.yml",
"data/typod-nlu-train.yml",
"test/nlu-valid.yml",
"test/typod-nlu-valid.yml",
]
for f in files:
if pathlib.Path(f).exists():
pathlib.Path(f).unlink()
cmd = ["keyboard", "generate", "data/nlu-orig.yml", "--prefix", "typod"]
res = runner.invoke(app, cmd)
for f in files:
assert pathlib.Path(f).exists()
pathlib.Path(f).unlink()
assert res.exit_code == 0
| 30.058824
| 78
| 0.630137
|
import pathlib
import re
import pytest
from typer.testing import CliRunner
from taipo.__main__ import app
from taipo.common import nlu_path_to_dataframe
runner = CliRunner()
@pytest.mark.parametrize(
"path_in,path_out", [("nlu.yml", "nlu.yml"), ("foobar.yml", "foobar.yml")]
)
def test_keyboard_augment(tmp_path, path_in, path_out):
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/{path_in}",
]
runner.invoke(app, cmd)
expected = nlu_path_to_dataframe("tests/data/nlu/nlu.yml").shape
assert nlu_path_to_dataframe(f"{tmp_path}/{path_out}").shape == expected
def test_keyboard_augment_keeps_annotations(tmp_path):
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/nlu.yml",
]
runner.invoke(app, cmd)
df_in = nlu_path_to_dataframe("tests/data/nlu/nlu.yml")
df_out = nlu_path_to_dataframe(f"{tmp_path}/nlu.yml")
annotation_pattern = r"\[\w+\]\(\w+\)"
for text_in, text_out in zip(df_in.text, df_out.text):
annotations_in = re.findall(annotation_pattern, text_in)
annotations_out = re.findall(annotation_pattern, text_out)
assert len(annotations_in) == len(annotations_out)
@pytest.mark.parametrize(
"lang", ["de", "en", "es", "fr", "he", "it", "nl", "pl", "th", "uk"]
)
def test_keyboard_lang(tmp_path, lang):
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/nlu.yml",
"--lang",
lang,
]
runner.invoke(app, cmd)
expected = nlu_path_to_dataframe("tests/data/nlu/nlu.yml").shape
assert nlu_path_to_dataframe(f"{tmp_path}/nlu.yml").shape == expected
def test_keyboard_generate():
files = [
"data/nlu-train.yml",
"data/typod-nlu-train.yml",
"test/nlu-valid.yml",
"test/typod-nlu-valid.yml",
]
for f in files:
if pathlib.Path(f).exists():
pathlib.Path(f).unlink()
cmd = ["keyboard", "generate", "data/nlu-orig.yml", "--prefix", "typod"]
res = runner.invoke(app, cmd)
for f in files:
assert pathlib.Path(f).exists()
pathlib.Path(f).unlink()
assert res.exit_code == 0
| true
| true
|
f70b087afb7bff339fcd596ca2064c38ebd2b923
| 7,044
|
py
|
Python
|
scirpy/tests/test_util.py
|
ktpolanski/scirpy
|
2d6e3a6347ad54425a8dea635fa04609aaf33c57
|
[
"BSD-3-Clause"
] | null | null | null |
scirpy/tests/test_util.py
|
ktpolanski/scirpy
|
2d6e3a6347ad54425a8dea635fa04609aaf33c57
|
[
"BSD-3-Clause"
] | null | null | null |
scirpy/tests/test_util.py
|
ktpolanski/scirpy
|
2d6e3a6347ad54425a8dea635fa04609aaf33c57
|
[
"BSD-3-Clause"
] | null | null | null |
from scirpy.util import (
_is_na,
_is_false,
_is_true,
_normalize_counts,
_is_symmetric,
_reduce_nonzero,
_translate_dna_to_protein,
)
from scirpy.util.graph import layout_components
from itertools import combinations
import igraph as ig
import numpy as np
import pandas as pd
import numpy.testing as npt
import pytest
import scipy.sparse
from .fixtures import adata_tra
import warnings
def test_reduce_nonzero():
A = np.array([[0, 0, 3], [1, 2, 5], [7, 0, 0]])
B = np.array([[1, 0, 3], [2, 1, 0], [6, 0, 5]])
A_csr = scipy.sparse.csr_matrix(A)
B_csr = scipy.sparse.csr_matrix(B)
A_csc = scipy.sparse.csc_matrix(A)
B_csc = scipy.sparse.csc_matrix(B)
expected = np.array([[1, 0, 3], [1, 1, 5], [6, 0, 5]])
with pytest.raises(ValueError):
_reduce_nonzero(A, B)
npt.assert_equal(_reduce_nonzero(A_csr, B_csr).toarray(), expected)
npt.assert_equal(_reduce_nonzero(A_csc, B_csc).toarray(), expected)
npt.assert_equal(_reduce_nonzero(A_csr, A_csr.copy()).toarray(), A_csr.toarray())
def test_is_symmatric():
M = np.array([[1, 2, 2], [2, 1, 3], [2, 3, 1]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert _is_symmetric(M)
assert _is_symmetric(S_csr)
assert _is_symmetric(S_csc)
assert _is_symmetric(S_lil)
M = np.array([[1, 2, 2], [2, 1, np.nan], [2, np.nan, np.nan]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert _is_symmetric(M)
assert _is_symmetric(S_csr)
assert _is_symmetric(S_csc)
assert _is_symmetric(S_lil)
M = np.array([[1, 2, 2], [2, 1, 3], [3, 2, 1]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert not _is_symmetric(M)
assert not _is_symmetric(S_csr)
assert not _is_symmetric(S_csc)
assert not _is_symmetric(S_lil)
def test_is_na():
warnings.filterwarnings("error")
assert _is_na(None)
assert _is_na(np.nan)
assert _is_na("nan")
assert not _is_na(42)
assert not _is_na("Foobar")
assert not _is_na(dict())
array_test = np.array(["None", "nan", None, np.nan, "foobar"])
array_expect = np.array([True, True, True, True, False])
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([False, False, False])
npt.assert_equal(_is_na(array_test), array_expect)
npt.assert_equal(_is_na(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_na(array_test_bool), array_expect_bool)
npt.assert_equal(_is_na(pd.Series(array_test_bool)), array_expect_bool)
def test_is_false():
warnings.filterwarnings("error")
assert _is_false(False)
assert _is_false(0)
assert _is_false("")
assert _is_false("False")
assert _is_false("false")
assert not _is_false(42)
assert not _is_false(True)
assert not _is_false("true")
assert not _is_false("foobar")
assert not _is_false(np.nan)
assert not _is_false(None)
assert not _is_false("nan")
assert not _is_false("None")
array_test = np.array(
["False", "false", 0, 1, True, False, "true", "Foobar", np.nan, "nan"],
dtype=object,
)
array_test_str = array_test.astype("str")
array_expect = np.array(
[True, True, True, False, False, True, False, False, False, False]
)
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([False, True, False])
npt.assert_equal(_is_false(array_test), array_expect)
npt.assert_equal(_is_false(array_test_str), array_expect)
npt.assert_equal(_is_false(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_false(pd.Series(array_test_str)), array_expect)
npt.assert_equal(_is_false(array_test_bool), array_expect_bool)
npt.assert_equal(_is_false(pd.Series(array_test_bool)), array_expect_bool)
def test_is_true():
warnings.filterwarnings("error")
assert not _is_true(False)
assert not _is_true(0)
assert not _is_true("")
assert not _is_true("False")
assert not _is_true("false")
assert not _is_true("0")
assert not _is_true(np.nan)
assert not _is_true(None)
assert not _is_true("nan")
assert not _is_true("None")
assert _is_true(42)
assert _is_true(True)
assert _is_true("true")
assert _is_true("foobar")
assert _is_true("True")
array_test = np.array(
["False", "false", 0, 1, True, False, "true", "Foobar", np.nan, "nan"],
dtype=object,
)
array_test_str = array_test.astype("str")
array_expect = np.array(
[False, False, False, True, True, False, True, True, False, False]
)
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([True, False, True])
npt.assert_equal(_is_true(array_test), array_expect)
npt.assert_equal(_is_true(array_test_str), array_expect)
npt.assert_equal(_is_true(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_true(pd.Series(array_test_str)), array_expect)
npt.assert_equal(_is_true(array_test_bool), array_expect_bool)
npt.assert_equal(_is_true(pd.Series(array_test_bool)), array_expect_bool)
@pytest.fixture
def group_df():
return pd.DataFrame().assign(
cell=["c1", "c2", "c3", "c4", "c5", "c6"],
sample=["s2", "s1", "s2", "s2", "s2", "s1"],
)
def test_normalize_counts(group_df):
with pytest.raises(ValueError):
_normalize_counts(group_df, True, None)
npt.assert_equal(_normalize_counts(group_df, False), [1] * 6)
npt.assert_equal(
_normalize_counts(group_df, "sample"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]
)
npt.assert_equal(
_normalize_counts(group_df, True, "sample"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]
)
def test_layout_components():
g = ig.Graph()
# add 100 unconnected nodes
g.add_vertices(100)
# add 50 2-node components
g.add_vertices(100)
g.add_edges([(ii, ii + 1) for ii in range(100, 200, 2)])
# add 33 3-node components
g.add_vertices(100)
for ii in range(200, 299, 3):
g.add_edges([(ii, ii + 1), (ii, ii + 2), (ii + 1, ii + 2)])
# add a couple of larger components
n = 300
for ii in np.random.randint(4, 30, size=10):
g.add_vertices(ii)
g.add_edges(combinations(range(n, n + ii), 2))
n += ii
layout_components(g, arrange_boxes="size", component_layout="fr")
try:
layout_components(g, arrange_boxes="rpack", component_layout="fr")
except ImportError:
warnings.warn(
"The 'rpack' layout-test was skipped because rectangle "
"packer is not installed. "
)
layout_components(g, arrange_boxes="squarify", component_layout="fr")
def test_translate_dna_to_protein(adata_tra):
for nt, aa in zip(adata_tra.obs["IR_VJ_1_cdr3_nt"], adata_tra.obs["IR_VJ_1_cdr3"]):
assert _translate_dna_to_protein(nt) == aa
| 32.611111
| 87
| 0.667376
|
from scirpy.util import (
_is_na,
_is_false,
_is_true,
_normalize_counts,
_is_symmetric,
_reduce_nonzero,
_translate_dna_to_protein,
)
from scirpy.util.graph import layout_components
from itertools import combinations
import igraph as ig
import numpy as np
import pandas as pd
import numpy.testing as npt
import pytest
import scipy.sparse
from .fixtures import adata_tra
import warnings
def test_reduce_nonzero():
A = np.array([[0, 0, 3], [1, 2, 5], [7, 0, 0]])
B = np.array([[1, 0, 3], [2, 1, 0], [6, 0, 5]])
A_csr = scipy.sparse.csr_matrix(A)
B_csr = scipy.sparse.csr_matrix(B)
A_csc = scipy.sparse.csc_matrix(A)
B_csc = scipy.sparse.csc_matrix(B)
expected = np.array([[1, 0, 3], [1, 1, 5], [6, 0, 5]])
with pytest.raises(ValueError):
_reduce_nonzero(A, B)
npt.assert_equal(_reduce_nonzero(A_csr, B_csr).toarray(), expected)
npt.assert_equal(_reduce_nonzero(A_csc, B_csc).toarray(), expected)
npt.assert_equal(_reduce_nonzero(A_csr, A_csr.copy()).toarray(), A_csr.toarray())
def test_is_symmatric():
M = np.array([[1, 2, 2], [2, 1, 3], [2, 3, 1]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert _is_symmetric(M)
assert _is_symmetric(S_csr)
assert _is_symmetric(S_csc)
assert _is_symmetric(S_lil)
M = np.array([[1, 2, 2], [2, 1, np.nan], [2, np.nan, np.nan]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert _is_symmetric(M)
assert _is_symmetric(S_csr)
assert _is_symmetric(S_csc)
assert _is_symmetric(S_lil)
M = np.array([[1, 2, 2], [2, 1, 3], [3, 2, 1]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert not _is_symmetric(M)
assert not _is_symmetric(S_csr)
assert not _is_symmetric(S_csc)
assert not _is_symmetric(S_lil)
def test_is_na():
warnings.filterwarnings("error")
assert _is_na(None)
assert _is_na(np.nan)
assert _is_na("nan")
assert not _is_na(42)
assert not _is_na("Foobar")
assert not _is_na(dict())
array_test = np.array(["None", "nan", None, np.nan, "foobar"])
array_expect = np.array([True, True, True, True, False])
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([False, False, False])
npt.assert_equal(_is_na(array_test), array_expect)
npt.assert_equal(_is_na(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_na(array_test_bool), array_expect_bool)
npt.assert_equal(_is_na(pd.Series(array_test_bool)), array_expect_bool)
def test_is_false():
warnings.filterwarnings("error")
assert _is_false(False)
assert _is_false(0)
assert _is_false("")
assert _is_false("False")
assert _is_false("false")
assert not _is_false(42)
assert not _is_false(True)
assert not _is_false("true")
assert not _is_false("foobar")
assert not _is_false(np.nan)
assert not _is_false(None)
assert not _is_false("nan")
assert not _is_false("None")
array_test = np.array(
["False", "false", 0, 1, True, False, "true", "Foobar", np.nan, "nan"],
dtype=object,
)
array_test_str = array_test.astype("str")
array_expect = np.array(
[True, True, True, False, False, True, False, False, False, False]
)
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([False, True, False])
npt.assert_equal(_is_false(array_test), array_expect)
npt.assert_equal(_is_false(array_test_str), array_expect)
npt.assert_equal(_is_false(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_false(pd.Series(array_test_str)), array_expect)
npt.assert_equal(_is_false(array_test_bool), array_expect_bool)
npt.assert_equal(_is_false(pd.Series(array_test_bool)), array_expect_bool)
def test_is_true():
warnings.filterwarnings("error")
assert not _is_true(False)
assert not _is_true(0)
assert not _is_true("")
assert not _is_true("False")
assert not _is_true("false")
assert not _is_true("0")
assert not _is_true(np.nan)
assert not _is_true(None)
assert not _is_true("nan")
assert not _is_true("None")
assert _is_true(42)
assert _is_true(True)
assert _is_true("true")
assert _is_true("foobar")
assert _is_true("True")
array_test = np.array(
["False", "false", 0, 1, True, False, "true", "Foobar", np.nan, "nan"],
dtype=object,
)
array_test_str = array_test.astype("str")
array_expect = np.array(
[False, False, False, True, True, False, True, True, False, False]
)
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([True, False, True])
npt.assert_equal(_is_true(array_test), array_expect)
npt.assert_equal(_is_true(array_test_str), array_expect)
npt.assert_equal(_is_true(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_true(pd.Series(array_test_str)), array_expect)
npt.assert_equal(_is_true(array_test_bool), array_expect_bool)
npt.assert_equal(_is_true(pd.Series(array_test_bool)), array_expect_bool)
@pytest.fixture
def group_df():
return pd.DataFrame().assign(
cell=["c1", "c2", "c3", "c4", "c5", "c6"],
sample=["s2", "s1", "s2", "s2", "s2", "s1"],
)
def test_normalize_counts(group_df):
with pytest.raises(ValueError):
_normalize_counts(group_df, True, None)
npt.assert_equal(_normalize_counts(group_df, False), [1] * 6)
npt.assert_equal(
_normalize_counts(group_df, "sample"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]
)
npt.assert_equal(
_normalize_counts(group_df, True, "sample"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]
)
def test_layout_components():
g = ig.Graph()
g.add_vertices(100)
g.add_vertices(100)
g.add_edges([(ii, ii + 1) for ii in range(100, 200, 2)])
g.add_vertices(100)
for ii in range(200, 299, 3):
g.add_edges([(ii, ii + 1), (ii, ii + 2), (ii + 1, ii + 2)])
n = 300
for ii in np.random.randint(4, 30, size=10):
g.add_vertices(ii)
g.add_edges(combinations(range(n, n + ii), 2))
n += ii
layout_components(g, arrange_boxes="size", component_layout="fr")
try:
layout_components(g, arrange_boxes="rpack", component_layout="fr")
except ImportError:
warnings.warn(
"The 'rpack' layout-test was skipped because rectangle "
"packer is not installed. "
)
layout_components(g, arrange_boxes="squarify", component_layout="fr")
def test_translate_dna_to_protein(adata_tra):
for nt, aa in zip(adata_tra.obs["IR_VJ_1_cdr3_nt"], adata_tra.obs["IR_VJ_1_cdr3"]):
assert _translate_dna_to_protein(nt) == aa
| true
| true
|
f70b09221802c961e0b9d4fb231642054bff3534
| 3,096
|
py
|
Python
|
fryptos/main.py
|
pyohei/encryptfile
|
988fa0b2f57c6482d71a81dba3e65ee0ff084048
|
[
"MIT"
] | null | null | null |
fryptos/main.py
|
pyohei/encryptfile
|
988fa0b2f57c6482d71a81dba3e65ee0ff084048
|
[
"MIT"
] | null | null | null |
fryptos/main.py
|
pyohei/encryptfile
|
988fa0b2f57c6482d71a81dba3e65ee0ff084048
|
[
"MIT"
] | null | null | null |
"""File path encryption.
Put files to public directory by encryption.
And this anchers of relationship.
This module anable change the anchers.
"""
import glob
import logging
import os
import shutil
try:
from . import filename
from .anchor.anchor import Anchor
except:
import filename
from anchor.anchor import Anchor
def main(src, dst):
"""Main script of this code."""
# Currently, you can use only `text` type ;)
anchor = Anchor('text')
for org_f in _read_files(src):
cur_f = anchor.request_current_path(org_f)
# WARNING: Theoritically, encrypted files have very low possibility which
# have collision file name, and this script does not check duplication of
# file name.
enc_f = _make_dest_dir(dst, _encrypt_file(org_f, anchor))
logging.debug('---')
logging.debug('Original: {0}'.format(org_f))
logging.debug('Current: {0}'.format(cur_f))
logging.debug('Encrypt: {0}'.format(enc_f))
# TODO: Add transaction process.
_copy(org_f, enc_f)
anchor.change(org_f, enc_f) # Write the change to anchor file
if cur_f and os.path.exists(cur_f):
_delete(dst, cur_f)
def _read_files(file_path):
"""Read all target files with generator."""
for r, d, fs in os.walk(file_path):
for f in fs:
yield os.path.join(r, f)
def _encrypt_file(fname, anchor):
"""Encrypt file name."""
return filename.change(fname)
def _make_dest_dir(public_dir, file_path):
"""Create destination directory."""
return os.path.join(public_dir, file_path)
def _copy(org_f, enc_f):
"""Copy source file into destination file."""
os.makedirs('/'.join(enc_f.split('/')[0:-1]))
shutil.copy(org_f, enc_f)
def _delete(dst_dir, cur_f):
"""Delete old encrypt file"""
delete_base_path = cur_f.replace(dst_dir.rstrip('/')+'/', '')
delete_path = os.path.join(dst_dir, delete_base_path.split('/')[0])
shutil.rmtree(delete_path)
logging.debug('Delete: {}'.format(delete_path))
def execute():
import argparse
from os.path import expanduser
from os.path import isdir
home_dir = expanduser('~')
p = argparse.ArgumentParser(description='Encrypt files.')
# source and destination is necessary argument.
p.add_argument('source', help='Source directory')
p.add_argument('destination', help='destination of encrypttion.')
# debug mode.
p.add_argument('-v', help='Verbose mode.', dest='verbose', action='count', default=0)
args = p.parse_args()
src = str(args.source)
dst = str(args.destination)
if not isdir(src):
print('No such directory \'{}\'.'.format(src))
quit()
if not isdir(dst):
print('No such directory \'{}\'.'.format(dst))
quit()
verbose = args.verbose
if isinstance(verbose, int) and verbose > 0:
log_format = '%(asctime)s\t[%(levelname)s]\t%(message)s'
logging.basicConfig(level=10, format=log_format)
main(src, dst)
if __name__ == '__main__':
execute()
| 29.207547
| 90
| 0.645995
|
import glob
import logging
import os
import shutil
try:
from . import filename
from .anchor.anchor import Anchor
except:
import filename
from anchor.anchor import Anchor
def main(src, dst):
anchor = Anchor('text')
for org_f in _read_files(src):
cur_f = anchor.request_current_path(org_f)
enc_f = _make_dest_dir(dst, _encrypt_file(org_f, anchor))
logging.debug('---')
logging.debug('Original: {0}'.format(org_f))
logging.debug('Current: {0}'.format(cur_f))
logging.debug('Encrypt: {0}'.format(enc_f))
_copy(org_f, enc_f)
anchor.change(org_f, enc_f)
if cur_f and os.path.exists(cur_f):
_delete(dst, cur_f)
def _read_files(file_path):
for r, d, fs in os.walk(file_path):
for f in fs:
yield os.path.join(r, f)
def _encrypt_file(fname, anchor):
return filename.change(fname)
def _make_dest_dir(public_dir, file_path):
return os.path.join(public_dir, file_path)
def _copy(org_f, enc_f):
os.makedirs('/'.join(enc_f.split('/')[0:-1]))
shutil.copy(org_f, enc_f)
def _delete(dst_dir, cur_f):
delete_base_path = cur_f.replace(dst_dir.rstrip('/')+'/', '')
delete_path = os.path.join(dst_dir, delete_base_path.split('/')[0])
shutil.rmtree(delete_path)
logging.debug('Delete: {}'.format(delete_path))
def execute():
import argparse
from os.path import expanduser
from os.path import isdir
home_dir = expanduser('~')
p = argparse.ArgumentParser(description='Encrypt files.')
p.add_argument('source', help='Source directory')
p.add_argument('destination', help='destination of encrypttion.')
p.add_argument('-v', help='Verbose mode.', dest='verbose', action='count', default=0)
args = p.parse_args()
src = str(args.source)
dst = str(args.destination)
if not isdir(src):
print('No such directory \'{}\'.'.format(src))
quit()
if not isdir(dst):
print('No such directory \'{}\'.'.format(dst))
quit()
verbose = args.verbose
if isinstance(verbose, int) and verbose > 0:
log_format = '%(asctime)s\t[%(levelname)s]\t%(message)s'
logging.basicConfig(level=10, format=log_format)
main(src, dst)
if __name__ == '__main__':
execute()
| true
| true
|
f70b0a9a919f5f4038de5f39bbb1976821f60654
| 24,739
|
py
|
Python
|
alibi/explainers/anchors/anchor_image.py
|
mauicv/alibi
|
30fea76391c255963c8818c2b54aa615b0d6f858
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
alibi/explainers/anchors/anchor_image.py
|
mauicv/alibi
|
30fea76391c255963c8818c2b54aa615b0d6f858
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
alibi/explainers/anchors/anchor_image.py
|
mauicv/alibi
|
30fea76391c255963c8818c2b54aa615b0d6f858
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import copy
import logging
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import numpy as np
from skimage.segmentation import felzenszwalb, quickshift, slic
from alibi.api.defaults import DEFAULT_DATA_ANCHOR_IMG, DEFAULT_META_ANCHOR
from alibi.api.interfaces import Explainer, Explanation
from alibi.exceptions import (AlibiPredictorCallException,
AlibiPredictorReturnTypeError)
from alibi.utils.wrappers import ArgmaxTransformer
from .anchor_base import AnchorBaseBeam
from .anchor_explanation import AnchorExplanation
logger = logging.getLogger(__name__)
DEFAULT_SEGMENTATION_KWARGS = {
'felzenszwalb': {},
'quickshift': {},
'slic': {'n_segments': 10, 'compactness': 10, 'sigma': .5}
} # type: Dict[str, Dict]
def scale_image(image: np.ndarray, scale: tuple = (0, 255)) -> np.ndarray:
"""
Scales an image in a specified range.
Parameters
----------
image
Image to be scale.
scale
The scaling interval.
Returns
-------
img_scaled
Scaled image.
"""
img_max, img_min = image.max(), image.min()
img_std = (image - img_min) / (img_max - img_min)
img_scaled = img_std * (scale[1] - scale[0]) + scale[0]
return img_scaled
class AnchorImageSampler:
def __init__(
self,
predictor: Callable,
segmentation_fn: Callable,
custom_segmentation: bool,
image: np.ndarray,
images_background: Optional[np.ndarray] = None,
p_sample: float = 0.5,
n_covered_ex: int = 10,
):
"""
Initialize anchor image sampler.
Parameters
----------
predictor
A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.
segmentation_fn
Function used to segment the images.
image
Image to be explained.
images_background
Images to overlay superpixels on.
p_sample
Probability for a pixel to be represented by the average value of its superpixel.
n_covered_ex
How many examples where anchors apply to store for each anchor sampled during search
(both examples where prediction on samples agrees/disagrees with `desired_label` are stored).
"""
self.predictor = predictor
self.segmentation_fn = segmentation_fn
self.custom_segmentation = custom_segmentation
self.image = image
self.images_background = images_background
self.n_covered_ex = n_covered_ex
self.p_sample = p_sample
self.segments = self.generate_superpixels(image)
self.segment_labels = list(np.unique(self.segments))
self.instance_label = self.predictor(image[np.newaxis, ...])[0]
def __call__(
self, anchor: Tuple[int, tuple], num_samples: int, compute_labels: bool = True
) -> List[Union[np.ndarray, float, int]]:
"""
Sample images from a perturbation distribution by masking randomly chosen superpixels
from the original image and replacing them with pixel values from superimposed images
if background images are provided to the explainer. Otherwise, the superpixels from the
original image are replaced with their average values.
Parameters
----------
anchor
- ``int`` - order of anchor in the batch.
- ``tuple`` - features (= superpixels) present in the proposed anchor.
num_samples
Number of samples used.
compute_labels
If ``True``, an array of comparisons between predictions on perturbed samples and
instance to be explained is returned.
Returns
-------
If ``compute_labels=True``, a list containing the following is returned
- `covered_true` - perturbed examples where the anchor applies and the model prediction on perturbed is the \
same as the instance prediction.
- `covered_false` - perturbed examples where the anchor applies and the model prediction on pertrurbed sample \
is NOT the same as the instance prediction.
- `labels` - `num_samples` ints indicating whether the prediction on the perturbed sample matches (1) \
the label of the instance to be explained or not (0).
- `data` - Matrix with 1s and 0s indicating whether the values in a superpixel will remain unchanged (1) or \
will be perturbed (0), for each sample.
- `1.0` - indicates exact coverage is not computed for this algorithm.
- `anchor[0]` - position of anchor in the batch request
Otherwise, a list containing the data matrix only is returned.
"""
if compute_labels:
raw_data, data = self.perturbation(anchor[1], num_samples)
labels = self.compare_labels(raw_data)
covered_true = raw_data[labels][: self.n_covered_ex]
covered_true = [scale_image(img) for img in covered_true]
covered_false = raw_data[np.logical_not(labels)][: self.n_covered_ex]
covered_false = [scale_image(img) for img in covered_false]
# coverage set to -1.0 as we can't compute 'true'coverage for this model
return [covered_true, covered_false, labels.astype(int), data, -1.0, anchor[0]] # type: ignore
else:
data = self._choose_superpixels(num_samples)
data[:, anchor[1]] = 1 # superpixels in candidate anchor are not perturbed
return [data]
def compare_labels(self, samples: np.ndarray) -> np.ndarray:
"""
Compute the agreement between a classifier prediction on an instance to be explained
and the prediction on a set of samples which have a subset of perturbed superpixels.
Parameters
----------
samples
Samples whose labels are to be compared with the instance label.
Returns
-------
A boolean array indicating whether the prediction was the same as the instance label.
"""
return self.predictor(samples) == self.instance_label
def _choose_superpixels(
self, num_samples: int, p_sample: float = 0.5
) -> np.ndarray:
"""
Generates a binary mask of dimension [num_samples, M] where M is the number of
image superpixels (segments).
Parameters
----------
num_samples
Number of perturbed images to be generated
p_sample:
The probability that a superpixel is perturbed
Returns
-------
data
Binary 2D mask, where each non-zero entry in a row indicates that
the values of the particular image segment will not be perturbed.
"""
n_features = len(self.segment_labels)
data = np.random.choice(
[0, 1], num_samples * n_features, p=[p_sample, 1 - p_sample]
)
data = data.reshape((num_samples, n_features))
return data
def perturbation(
self, anchor: tuple, num_samples: int
) -> Tuple[np.ndarray, np.ndarray]:
"""
Perturbs an image by altering the values of selected superpixels. If a dataset of image
backgrounds is provided to the explainer, then the superpixels are replaced with the
equivalent superpixels from the background image. Otherwise, the superpixels are replaced
by their average value.
Parameters
----------
anchor:
Contains the superpixels whose values are not going to be perturbed.
num_samples:
Number of perturbed samples to be returned.
Returns
-------
imgs
A `[num_samples, H, W, C]` array of perturbed images.
segments_mask
A `[num_samples, M]` binary mask, where `M` is the number of image superpixels
segments. 1 indicates the values in that particular superpixels are not
perturbed.
"""
image = self.image
segments = self.segments
backgrounds: Union[np.ndarray, List[None]]
# choose superpixels to be perturbed
segments_mask = self._choose_superpixels(num_samples, p_sample=self.p_sample)
segments_mask[:, anchor] = 1
# for each sample, need to sample one of the background images if provided
if self.images_background is not None:
backgrounds = np.random.choice(
range(len(self.images_background)),
segments_mask.shape[0],
replace=True,
)
else:
backgrounds = [None] * segments_mask.shape[0]
# create fudged image where the pixel value in each superpixel is set to the
# average over the superpixel for each channel
fudged_image = image.copy()
n_channels = image.shape[-1]
for x in np.unique(segments):
fudged_image[segments == x] = [
np.mean(image[segments == x][:, i]) for i in range(n_channels)
]
pert_imgs = []
for mask, background_idx in zip(segments_mask, backgrounds):
temp = copy.deepcopy(image)
to_perturb = np.where(mask == 0)[0]
# create mask for each superpixel not present in the sample
mask = np.zeros(segments.shape).astype(bool)
for superpixel in to_perturb:
mask[segments == superpixel] = True
if background_idx is not None:
# replace values with those of background image
temp[mask] = self.images_background[background_idx][mask] # type: ignore[index]
else:
# ... or with the averaged superpixel value
temp[mask] = fudged_image[mask]
pert_imgs.append(temp)
return np.array(pert_imgs), segments_mask
def generate_superpixels(self, image: np.ndarray) -> np.ndarray:
"""
Generates superpixels from (i.e., segments) an image.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A `[H, W]` array of integers. Each integer is a segment (superpixel) label.
"""
image_preproc = self._preprocess_img(image)
return self.segmentation_fn(image_preproc)
def _preprocess_img(self, image: np.ndarray) -> np.ndarray:
"""
Applies necessary transformations to the image prior to segmentation.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A preprocessed image.
"""
# Grayscale images are repeated across channels
if not self.custom_segmentation and image.shape[-1] == 1:
image_preproc = np.repeat(image, 3, axis=2)
else:
image_preproc = image.copy()
return image_preproc
class AnchorImage(Explainer):
def __init__(self,
predictor: Callable[[np.ndarray], np.ndarray],
image_shape: tuple,
dtype: Type[np.generic] = np.float32,
segmentation_fn: Any = 'slic',
segmentation_kwargs: Optional[dict] = None,
images_background: Optional[np.ndarray] = None,
seed: Optional[int] = None) -> None:
"""
Initialize anchor image explainer.
Parameters
----------
predictor
A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.
image_shape
Shape of the image to be explained. The channel axis is expected to be last.
dtype
A `numpy` scalar type that corresponds to the type of input array expected by `predictor`. This may be
used to construct arrays of the given type to be passed through the `predictor`. For most use cases
this argument should have no effect, but it is exposed for use with predictors that would break when
called with an array of unsupported type.
segmentation_fn
Any of the built in segmentation function strings: ``'felzenszwalb'``, ``'slic'`` or ``'quickshift'`` or
a custom segmentation function (callable) which returns an image mask with labels for each superpixel.
See http://scikit-image.org/docs/dev/api/skimage.segmentation.html for more info.
segmentation_kwargs
Keyword arguments for the built in segmentation functions.
images_background
Images to overlay superpixels on.
seed
If set, ensures different runs with the same input will yield same explanation.
Raises
------
:py:class:`alibi.exceptions.AlibiPredictorCallException`
If calling `predictor` fails at runtime.
:py:class:`alibi.exceptions.AlibiPredictorReturnTypeError`
If the return type of `predictor` is not `np.ndarray`.
"""
super().__init__(meta=copy.deepcopy(DEFAULT_META_ANCHOR))
np.random.seed(seed)
# TODO: this logic needs improvement. We should check against a fixed set of strings
# for built-ins instead of any `str`.
if isinstance(segmentation_fn, str) and segmentation_kwargs is None:
try:
segmentation_kwargs = DEFAULT_SEGMENTATION_KWARGS[segmentation_fn]
except KeyError:
logger.warning(
'DEFAULT_SEGMENTATION_KWARGS did not contain any entry'
'for segmentation method {}. No kwargs will be passed to'
'the segmentation function!'.format(segmentation_fn)
)
segmentation_kwargs = {}
elif callable(segmentation_fn) and segmentation_kwargs:
logger.warning(
'Specified both a segmentation function to create superpixels and '
'keyword arguments for built-in segmentation functions. By default '
'the specified segmentation function will be used.'
)
# set the predictor
self.image_shape = tuple(image_shape) # coerce lists
self.dtype = dtype
self.predictor = self._transform_predictor(predictor)
# segmentation function is either a user-defined function or one of the values in
fn_options = {'felzenszwalb': felzenszwalb, 'slic': slic, 'quickshift': quickshift}
if callable(segmentation_fn):
self.custom_segmentation = True
self.segmentation_fn = segmentation_fn
else:
self.custom_segmentation = False
self.segmentation_fn = partial(fn_options[segmentation_fn], **segmentation_kwargs) # type: ignore[arg-type]
self.images_background = images_background
# a superpixel is perturbed with prob 1 - p_sample
self.p_sample = 0.5 # type: float
# update metadata
self.meta['params'].update(
custom_segmentation=self.custom_segmentation,
segmentation_kwargs=segmentation_kwargs,
p_sample=self.p_sample,
seed=seed,
image_shape=self.image_shape,
images_background=self.images_background
)
if not self.custom_segmentation:
self.meta['params'].update(segmentation_fn=segmentation_fn)
else:
self.meta['params'].update(segmentation_fn='custom')
def generate_superpixels(self, image: np.ndarray) -> np.ndarray:
"""
Generates superpixels from (i.e., segments) an image.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A `[H, W]` array of integers. Each integer is a segment (superpixel) label.
"""
image_preproc = self._preprocess_img(image)
return self.segmentation_fn(image_preproc)
def _preprocess_img(self, image: np.ndarray) -> np.ndarray:
"""
Applies necessary transformations to the image prior to segmentation.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A preprocessed image.
"""
# Grayscale images are repeated across channels
if not self.custom_segmentation and image.shape[-1] == 1:
image_preproc = np.repeat(image, 3, axis=2)
else:
image_preproc = image.copy()
return image_preproc
def explain(self, # type: ignore[override]
image: np.ndarray,
p_sample: float = 0.5,
threshold: float = 0.95,
delta: float = 0.1,
tau: float = 0.15,
batch_size: int = 100,
coverage_samples: int = 10000,
beam_size: int = 1,
stop_on_first: bool = False,
max_anchor_size: Optional[int] = None,
min_samples_start: int = 100,
n_covered_ex: int = 10,
binary_cache_size: int = 10000,
cache_margin: int = 1000,
verbose: bool = False,
verbose_every: int = 1,
**kwargs: Any) -> Explanation:
"""
Explain instance and return anchor with metadata.
Parameters
----------
image
Image to be explained.
p_sample
Probability for a pixel to be represented by the average value of its superpixel.
threshold
Minimum precision threshold.
delta
Used to compute `beta`.
tau
Margin between lower confidence bound and minimum precision of upper bound.
batch_size
Batch size used for sampling.
coverage_samples
Number of samples used to estimate coverage from during result search.
beam_size
The number of anchors extended at each step of new anchors construction.
stop_on_first
If ``True``, the beam search algorithm will return the first anchor that has satisfies the
probability constraint.
max_anchor_size
Maximum number of features in result.
min_samples_start
Min number of initial samples.
n_covered_ex
How many examples where anchors apply to store for each anchor sampled during search
(both examples where prediction on samples agrees/disagrees with `desired_label` are stored).
binary_cache_size
The result search pre-allocates `binary_cache_size` batches for storing the binary arrays
returned during sampling.
cache_margin
When only ``max(cache_margin, batch_size)`` positions in the binary cache remain empty, a new cache
of the same size is pre-allocated to continue buffering samples.
verbose
Display updates during the anchor search iterations.
verbose_every
Frequency of displayed iterations during anchor search process.
Returns
-------
explanation
`Explanation` object containing the anchor explaining the instance with additional metadata as attributes.
See usage at `AnchorImage examples`_ for details.
.. _AnchorImage examples:
https://docs.seldon.io/projects/alibi/en/stable/methods/Anchors.html
"""
# get params for storage in meta
params = locals()
remove = ['image', 'self']
for key in remove:
params.pop(key)
sampler = AnchorImageSampler(
predictor=self.predictor,
segmentation_fn=self.segmentation_fn,
custom_segmentation=self.custom_segmentation,
image=image,
images_background=self.images_background,
p_sample=p_sample,
n_covered_ex=n_covered_ex,
)
# get anchors and add metadata
mab = AnchorBaseBeam(
samplers=[sampler],
sample_cache_size=binary_cache_size,
cache_margin=cache_margin,
**kwargs)
result = mab.anchor_beam(
desired_confidence=threshold,
delta=delta,
epsilon=tau,
batch_size=batch_size,
coverage_samples=coverage_samples,
beam_size=beam_size,
stop_on_first=stop_on_first,
max_anchor_size=max_anchor_size,
min_samples_start=min_samples_start,
verbose=verbose,
verbose_every=verbose_every,
**kwargs,
) # type: Any
return self._build_explanation(
image, result, sampler.instance_label, params, sampler
)
def _build_explanation(
self,
image: np.ndarray,
result: dict,
predicted_label: int,
params: dict,
sampler: AnchorImageSampler,
) -> Explanation:
"""
Uses the metadata returned by the anchor search algorithm together with
the instance to be explained to build an explanation object.
Parameters
----------
image
Instance to be explained.
result
Dictionary containing the search anchor and metadata.
predicted_label
Label of the instance to be explained.
params
Parameters passed to `:py:meth:alibi.explainers.anchor_image.AnchorImage.explain`.
"""
result['instance'] = image
result['instances'] = np.expand_dims(image, 0)
result['prediction'] = np.array([predicted_label])
# overlay image with anchor mask
anchor = self.overlay_mask(image, sampler.segments, result['feature'])
exp = AnchorExplanation('image', result)
# output explanation dictionary
data = copy.deepcopy(DEFAULT_DATA_ANCHOR_IMG)
data.update(
anchor=anchor,
segments=sampler.segments,
precision=exp.precision(),
coverage=exp.coverage(),
raw=exp.exp_map
)
# create explanation object
explanation = Explanation(meta=copy.deepcopy(self.meta), data=data)
# params passed to explain
explanation.meta['params'].update(params)
return explanation
def overlay_mask(self, image: np.ndarray, segments: np.ndarray, mask_features: list,
scale: tuple = (0, 255)) -> np.ndarray:
"""
Overlay image with mask described by the mask features.
Parameters
----------
image
Image to be explained.
segments
Superpixels.
mask_features
List with superpixels present in mask.
scale
Pixel scale for masked image.
Returns
-------
masked_image
Image overlaid with mask.
"""
mask = np.zeros(segments.shape)
for f in mask_features:
mask[segments == f] = 1
image = scale_image(image, scale=scale)
masked_image = (image * np.expand_dims(mask, 2)).astype(int)
return masked_image
def _transform_predictor(self, predictor: Callable) -> Callable:
# check if predictor returns predicted class or prediction probabilities for each class
# if needed adjust predictor so it returns the predicted class
x = np.zeros((1,) + self.image_shape, dtype=self.dtype)
try:
prediction = predictor(x)
except Exception as e:
msg = f"Predictor failed to be called on {type(x)} of shape {x.shape} and dtype {x.dtype}. " \
f"Check that the parameter `image_shape` is correctly specified."
raise AlibiPredictorCallException(msg) from e
if not isinstance(prediction, np.ndarray):
msg = f"Excepted predictor return type to be {np.ndarray} but got {type(prediction)}."
raise AlibiPredictorReturnTypeError(msg)
if np.argmax(prediction.shape) == 0:
return predictor
else:
transformer = ArgmaxTransformer(predictor)
return transformer
def reset_predictor(self, predictor: Callable) -> None:
"""
Resets the predictor function.
Parameters
----------
predictor
New predictor function.
"""
self.predictor = self._transform_predictor(predictor)
| 37.25753
| 120
| 0.606653
|
import copy
import logging
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import numpy as np
from skimage.segmentation import felzenszwalb, quickshift, slic
from alibi.api.defaults import DEFAULT_DATA_ANCHOR_IMG, DEFAULT_META_ANCHOR
from alibi.api.interfaces import Explainer, Explanation
from alibi.exceptions import (AlibiPredictorCallException,
AlibiPredictorReturnTypeError)
from alibi.utils.wrappers import ArgmaxTransformer
from .anchor_base import AnchorBaseBeam
from .anchor_explanation import AnchorExplanation
logger = logging.getLogger(__name__)
DEFAULT_SEGMENTATION_KWARGS = {
'felzenszwalb': {},
'quickshift': {},
'slic': {'n_segments': 10, 'compactness': 10, 'sigma': .5}
}
def scale_image(image: np.ndarray, scale: tuple = (0, 255)) -> np.ndarray:
img_max, img_min = image.max(), image.min()
img_std = (image - img_min) / (img_max - img_min)
img_scaled = img_std * (scale[1] - scale[0]) + scale[0]
return img_scaled
class AnchorImageSampler:
def __init__(
self,
predictor: Callable,
segmentation_fn: Callable,
custom_segmentation: bool,
image: np.ndarray,
images_background: Optional[np.ndarray] = None,
p_sample: float = 0.5,
n_covered_ex: int = 10,
):
self.predictor = predictor
self.segmentation_fn = segmentation_fn
self.custom_segmentation = custom_segmentation
self.image = image
self.images_background = images_background
self.n_covered_ex = n_covered_ex
self.p_sample = p_sample
self.segments = self.generate_superpixels(image)
self.segment_labels = list(np.unique(self.segments))
self.instance_label = self.predictor(image[np.newaxis, ...])[0]
def __call__(
self, anchor: Tuple[int, tuple], num_samples: int, compute_labels: bool = True
) -> List[Union[np.ndarray, float, int]]:
if compute_labels:
raw_data, data = self.perturbation(anchor[1], num_samples)
labels = self.compare_labels(raw_data)
covered_true = raw_data[labels][: self.n_covered_ex]
covered_true = [scale_image(img) for img in covered_true]
covered_false = raw_data[np.logical_not(labels)][: self.n_covered_ex]
covered_false = [scale_image(img) for img in covered_false]
return [covered_true, covered_false, labels.astype(int), data, -1.0, anchor[0]] # type: ignore
else:
data = self._choose_superpixels(num_samples)
data[:, anchor[1]] = 1 # superpixels in candidate anchor are not perturbed
return [data]
def compare_labels(self, samples: np.ndarray) -> np.ndarray:
return self.predictor(samples) == self.instance_label
def _choose_superpixels(
self, num_samples: int, p_sample: float = 0.5
) -> np.ndarray:
n_features = len(self.segment_labels)
data = np.random.choice(
[0, 1], num_samples * n_features, p=[p_sample, 1 - p_sample]
)
data = data.reshape((num_samples, n_features))
return data
def perturbation(
self, anchor: tuple, num_samples: int
) -> Tuple[np.ndarray, np.ndarray]:
image = self.image
segments = self.segments
backgrounds: Union[np.ndarray, List[None]]
# choose superpixels to be perturbed
segments_mask = self._choose_superpixels(num_samples, p_sample=self.p_sample)
segments_mask[:, anchor] = 1
# for each sample, need to sample one of the background images if provided
if self.images_background is not None:
backgrounds = np.random.choice(
range(len(self.images_background)),
segments_mask.shape[0],
replace=True,
)
else:
backgrounds = [None] * segments_mask.shape[0]
# create fudged image where the pixel value in each superpixel is set to the
# average over the superpixel for each channel
fudged_image = image.copy()
n_channels = image.shape[-1]
for x in np.unique(segments):
fudged_image[segments == x] = [
np.mean(image[segments == x][:, i]) for i in range(n_channels)
]
pert_imgs = []
for mask, background_idx in zip(segments_mask, backgrounds):
temp = copy.deepcopy(image)
to_perturb = np.where(mask == 0)[0]
# create mask for each superpixel not present in the sample
mask = np.zeros(segments.shape).astype(bool)
for superpixel in to_perturb:
mask[segments == superpixel] = True
if background_idx is not None:
# replace values with those of background image
temp[mask] = self.images_background[background_idx][mask] # type: ignore[index]
else:
# ... or with the averaged superpixel value
temp[mask] = fudged_image[mask]
pert_imgs.append(temp)
return np.array(pert_imgs), segments_mask
def generate_superpixels(self, image: np.ndarray) -> np.ndarray:
image_preproc = self._preprocess_img(image)
return self.segmentation_fn(image_preproc)
def _preprocess_img(self, image: np.ndarray) -> np.ndarray:
# Grayscale images are repeated across channels
if not self.custom_segmentation and image.shape[-1] == 1:
image_preproc = np.repeat(image, 3, axis=2)
else:
image_preproc = image.copy()
return image_preproc
class AnchorImage(Explainer):
def __init__(self,
predictor: Callable[[np.ndarray], np.ndarray],
image_shape: tuple,
dtype: Type[np.generic] = np.float32,
segmentation_fn: Any = 'slic',
segmentation_kwargs: Optional[dict] = None,
images_background: Optional[np.ndarray] = None,
seed: Optional[int] = None) -> None:
super().__init__(meta=copy.deepcopy(DEFAULT_META_ANCHOR))
np.random.seed(seed)
# TODO: this logic needs improvement. We should check against a fixed set of strings
# for built-ins instead of any `str`.
if isinstance(segmentation_fn, str) and segmentation_kwargs is None:
try:
segmentation_kwargs = DEFAULT_SEGMENTATION_KWARGS[segmentation_fn]
except KeyError:
logger.warning(
'DEFAULT_SEGMENTATION_KWARGS did not contain any entry'
'for segmentation method {}. No kwargs will be passed to'
'the segmentation function!'.format(segmentation_fn)
)
segmentation_kwargs = {}
elif callable(segmentation_fn) and segmentation_kwargs:
logger.warning(
'Specified both a segmentation function to create superpixels and '
'keyword arguments for built-in segmentation functions. By default '
'the specified segmentation function will be used.'
)
# set the predictor
self.image_shape = tuple(image_shape) # coerce lists
self.dtype = dtype
self.predictor = self._transform_predictor(predictor)
# segmentation function is either a user-defined function or one of the values in
fn_options = {'felzenszwalb': felzenszwalb, 'slic': slic, 'quickshift': quickshift}
if callable(segmentation_fn):
self.custom_segmentation = True
self.segmentation_fn = segmentation_fn
else:
self.custom_segmentation = False
self.segmentation_fn = partial(fn_options[segmentation_fn], **segmentation_kwargs) # type: ignore[arg-type]
self.images_background = images_background
# a superpixel is perturbed with prob 1 - p_sample
self.p_sample = 0.5 # type: float
# update metadata
self.meta['params'].update(
custom_segmentation=self.custom_segmentation,
segmentation_kwargs=segmentation_kwargs,
p_sample=self.p_sample,
seed=seed,
image_shape=self.image_shape,
images_background=self.images_background
)
if not self.custom_segmentation:
self.meta['params'].update(segmentation_fn=segmentation_fn)
else:
self.meta['params'].update(segmentation_fn='custom')
def generate_superpixels(self, image: np.ndarray) -> np.ndarray:
image_preproc = self._preprocess_img(image)
return self.segmentation_fn(image_preproc)
def _preprocess_img(self, image: np.ndarray) -> np.ndarray:
# Grayscale images are repeated across channels
if not self.custom_segmentation and image.shape[-1] == 1:
image_preproc = np.repeat(image, 3, axis=2)
else:
image_preproc = image.copy()
return image_preproc
def explain(self, # type: ignore[override]
image: np.ndarray,
p_sample: float = 0.5,
threshold: float = 0.95,
delta: float = 0.1,
tau: float = 0.15,
batch_size: int = 100,
coverage_samples: int = 10000,
beam_size: int = 1,
stop_on_first: bool = False,
max_anchor_size: Optional[int] = None,
min_samples_start: int = 100,
n_covered_ex: int = 10,
binary_cache_size: int = 10000,
cache_margin: int = 1000,
verbose: bool = False,
verbose_every: int = 1,
**kwargs: Any) -> Explanation:
# get params for storage in meta
params = locals()
remove = ['image', 'self']
for key in remove:
params.pop(key)
sampler = AnchorImageSampler(
predictor=self.predictor,
segmentation_fn=self.segmentation_fn,
custom_segmentation=self.custom_segmentation,
image=image,
images_background=self.images_background,
p_sample=p_sample,
n_covered_ex=n_covered_ex,
)
# get anchors and add metadata
mab = AnchorBaseBeam(
samplers=[sampler],
sample_cache_size=binary_cache_size,
cache_margin=cache_margin,
**kwargs)
result = mab.anchor_beam(
desired_confidence=threshold,
delta=delta,
epsilon=tau,
batch_size=batch_size,
coverage_samples=coverage_samples,
beam_size=beam_size,
stop_on_first=stop_on_first,
max_anchor_size=max_anchor_size,
min_samples_start=min_samples_start,
verbose=verbose,
verbose_every=verbose_every,
**kwargs,
) # type: Any
return self._build_explanation(
image, result, sampler.instance_label, params, sampler
)
def _build_explanation(
self,
image: np.ndarray,
result: dict,
predicted_label: int,
params: dict,
sampler: AnchorImageSampler,
) -> Explanation:
result['instance'] = image
result['instances'] = np.expand_dims(image, 0)
result['prediction'] = np.array([predicted_label])
# overlay image with anchor mask
anchor = self.overlay_mask(image, sampler.segments, result['feature'])
exp = AnchorExplanation('image', result)
# output explanation dictionary
data = copy.deepcopy(DEFAULT_DATA_ANCHOR_IMG)
data.update(
anchor=anchor,
segments=sampler.segments,
precision=exp.precision(),
coverage=exp.coverage(),
raw=exp.exp_map
)
# create explanation object
explanation = Explanation(meta=copy.deepcopy(self.meta), data=data)
# params passed to explain
explanation.meta['params'].update(params)
return explanation
def overlay_mask(self, image: np.ndarray, segments: np.ndarray, mask_features: list,
scale: tuple = (0, 255)) -> np.ndarray:
mask = np.zeros(segments.shape)
for f in mask_features:
mask[segments == f] = 1
image = scale_image(image, scale=scale)
masked_image = (image * np.expand_dims(mask, 2)).astype(int)
return masked_image
def _transform_predictor(self, predictor: Callable) -> Callable:
# check if predictor returns predicted class or prediction probabilities for each class
# if needed adjust predictor so it returns the predicted class
x = np.zeros((1,) + self.image_shape, dtype=self.dtype)
try:
prediction = predictor(x)
except Exception as e:
msg = f"Predictor failed to be called on {type(x)} of shape {x.shape} and dtype {x.dtype}. " \
f"Check that the parameter `image_shape` is correctly specified."
raise AlibiPredictorCallException(msg) from e
if not isinstance(prediction, np.ndarray):
msg = f"Excepted predictor return type to be {np.ndarray} but got {type(prediction)}."
raise AlibiPredictorReturnTypeError(msg)
if np.argmax(prediction.shape) == 0:
return predictor
else:
transformer = ArgmaxTransformer(predictor)
return transformer
def reset_predictor(self, predictor: Callable) -> None:
self.predictor = self._transform_predictor(predictor)
| true
| true
|
f70b0eece0552cb8650942bf13b7e0fb7ec7bb56
| 27,283
|
py
|
Python
|
tensor2tensor/models/research/moe.py
|
kpe/tensor2tensor
|
453c473030c354a3d9a4c27b12bcec8942334bf4
|
[
"Apache-2.0"
] | 34
|
2018-12-19T01:00:57.000Z
|
2021-03-26T09:36:37.000Z
|
tensor2tensor/models/research/moe.py
|
kpe/tensor2tensor
|
453c473030c354a3d9a4c27b12bcec8942334bf4
|
[
"Apache-2.0"
] | 11
|
2018-12-25T03:37:59.000Z
|
2021-08-25T14:43:58.000Z
|
tensor2tensor/models/research/moe.py
|
kpe/tensor2tensor
|
453c473030c354a3d9a4c27b12bcec8942334bf4
|
[
"Apache-2.0"
] | 9
|
2018-12-27T08:00:44.000Z
|
2020-06-08T03:05:14.000Z
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture-of-experts code.
Interfaces and algorithms are under development and subject to rapid change
without notice.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import tensorflow as tf
def transformer_moe_layer_v1(inputs, output_dim, hparams, train,
master_dtype=tf.bfloat16,
slice_dtype=tf.float32):
"""Local mixture of experts that works well on TPU.
Adapted from the paper https://arxiv.org/abs/1701.06538
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_num_experts: number of experts
hparams.moe_hidden_size: size of hidden layer in each expert
hparams.moe_group_size: size of each "group" for gating purposes
hparams.moe_capacity_factor_train: a float
hparams.moe_capacity_factor_eval: a float
hparams.moe_gating: a string
+ all hyperparmeters used by _top_2_gating()
The number of parameters in the gating network is:
(input_dim.size * hparams.num_experts) +
The number of parameters in the experts themselves is:
(hparams.num_experts
* (input_dim.size + output_dim.size)
* hparams.moe_hidden_size)
The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting
of the representations of all positions in a batch of sequences.
Each position of each sequence is sent to 0-2 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
but due to our hacked-up gather-by-matmul implementation, we need to divide
the batch into "groups". For each group, the same number of elements
are sent to each expert.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves.
Args:
inputs: a mtf.Tensor with shape [<batch_dims...>, length_dim, input_dim]
output_dim: a mtf.Dimension (for Transformer, this is input_dim)
hparams: model hyperparameters
train: a boolean
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
Returns:
outputs: a Tensor with shape [<batch_dims...>, length_dim, output_dim]
loss: a mtf scalar
Raises:
ValueError: on unrecognized hparams.moe_gating
"""
orig_inputs = inputs
input_dim = inputs.shape.dims[-1]
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
experts_dim = mtf.Dimension("experts", hparams.moe_num_experts)
group_size_dim = mtf.Dimension("group", hparams.moe_group_size)
batch_dim = mtf.Dimension(
orig_inputs.shape[0].name,
orig_inputs.shape.size // (group_size_dim.size * input_dim.size))
inputs = mtf.reshape(inputs, [batch_dim, group_size_dim, input_dim])
# Each sequence sends expert_capacity positions to each expert.
capacity_factor = (
hparams.moe_capacity_factor_train if train else
hparams.moe_capacity_factor_eval)
expert_capacity = min(
group_size_dim.size,
int((group_size_dim.size * capacity_factor) / experts_dim.size))
expert_capacity_dim = mtf.Dimension("expert_capacity", expert_capacity)
experts_dim_unsplit = mtf.Dimension("expert_unsplit", experts_dim.size)
batch_dim_unsplit = mtf.Dimension("batch_unsplit", batch_dim.size)
if hparams.moe_gating == "top_2":
dispatch_tensor, combine_tensor, loss = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=experts_dim_unsplit,
expert_capacity_dim=expert_capacity_dim,
hparams=hparams,
train=train)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# put num_experts dimension first to make split easier in alltoall
expert_inputs = mtf.einsum([inputs, dispatch_tensor], mtf.Shape(
[experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))
expert_inputs = mtf.reshape(expert_inputs, mtf.Shape(
[experts_dim, batch_dim_unsplit, expert_capacity_dim, input_dim]))
# Now feed the expert inputs through the experts.
h = mtf.layers.dense(
expert_inputs, hidden_dim, expert_dims=[experts_dim],
activation=mtf.relu, use_bias=False, master_dtype=master_dtype,
slice_dtype=slice_dtype, name="x0")
expert_output = mtf.layers.dense(
h, output_dim, expert_dims=[experts_dim], use_bias=False,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="x1")
expert_output = mtf.reshape(expert_output, mtf.Shape(
[experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))
output = mtf.einsum([expert_output, combine_tensor], mtf.Shape(
[batch_dim, group_size_dim, output_dim]))
output = mtf.reshape(output, orig_inputs.shape.dims[:-1] + [output_dim])
return output, loss * hparams.moe_loss_coef
def transformer_moe_layer_v2(inputs, output_dim, hparams, train,
master_dtype=tf.bfloat16, slice_dtype=tf.float32):
"""2-level mixture of experts.
Adapted from the paper https://arxiv.org/abs/1701.06538
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_num_experts: number of experts
hparams.moe_hidden_size: size of hidden layer in each expert
hparams.moe_group_size: size of each "group" for gating purposes
hparams.moe_capacity_factor_train: a float
hparams.moe_capacity_factor_eval: a float
hparams.moe_capacity_factor_second_level: a float
hparams.moe_gating: a string
+ all hyperparmeters used by _top_2_gating()
One set of params for experts in first level and different of hparams
per expert in the second level.
The number of parameters in the gating network is:
(input_dim.size * (hparams.num_experts) +
(moe_hidden_size * hparams.num_experts) * hparams.num_experts
The number of parameters in the experts themselves is:
(hparams.num_experts
* (input_dim.size + output_dim.size)
* hparams.moe_hidden_size)
The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting
of the representations of all positions in a batch of sequences.
Each position of each sequence is sent to 0-3 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
but due to our hacked-up gather-by-matmul implementation, we need to divide
the batch into "groups". For each group, the same number of elements
are sent to each expert.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves.
Dimensions cheat sheet:
a, b: batch size
l: original sequence length
m: input depth
n: output depth
g, h: number of groups
s, t: group size
x, y: number of experts
c, d: expert capacity
input: [a0, b1, l, m]
input: [a0, g1, s, m]
dispatch_tensor_x: [a0, g1, s, x, c]
expert_input: [a0, g1, x, c, m]
alltoall: [a0, g, x1, c, m]
alltoall: [a0, g, x1, c, m]
transpose: [x1, a0, g, c, m]
reshape: [x1, h0, s, m]
assignment2: [x1, h0, t, y, d]
expert_input2: [x1, h0, y, d, m]
alltoall: [x1, h, y0, d, m]
...
reverse of that
gating params 0: [m, x]
gating params 1: [x1, m, y]
expert params:
[x1, y0, m, hidden]
[x1, y0, hidden, n]
Args:
inputs: a mtf.Tensor with shape [a, b, l, m]
output_dim: a mtf.Dimension (for Transformer, this is input_dim)
hparams: model hyperparameters
train: a boolean
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
Returns:
outputs: a Tensor with shape [a, b, l, n]
loss: a mtf scalar
Raises:
ValueError: on unrecognized hparams.moe_gating
"""
insert_outer_batch_dim = (len(inputs.shape.dims) == 3)
if insert_outer_batch_dim:
inputs = mtf.reshape(
inputs, [mtf.Dimension("outer_batch", 1)] + inputs.shape.dims)
assert len(hparams.moe_num_experts) == 2
a0, b1, l, m = inputs.shape.dims
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
x1 = mtf.Dimension("expert_x", hparams.moe_num_experts[0])
y0 = mtf.Dimension("expert_y", hparams.moe_num_experts[1])
x = mtf.Dimension("expert_x_unsplit", hparams.moe_num_experts[0])
y = mtf.Dimension("expert_y_unsplit", hparams.moe_num_experts[1])
n = output_dim
# We "cheat" here and look at the mesh shape and layout. This is to ensure
# that the number of groups (g.size) is a multiple of the mesh dimension
# over which those groups are split.
num_groups, group_size = _split_into_groups(
b1.size * l.size, hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, b1))
g1 = mtf.Dimension(b1.name, num_groups)
g = mtf.Dimension(b1.name + "_unsplit", g1.size)
s = mtf.Dimension("group_size_x", group_size)
# Each sequence sends (at most?) expert_capacity positions to each expert.
# Static expert_capacity dimension is needed for expert batch sizes
capacity_factor = (
hparams.moe_capacity_factor_train if train else
hparams.moe_capacity_factor_eval)
expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size))
expert_capacity = max(expert_capacity, 4)
c = mtf.Dimension("expert_capacity_x", expert_capacity)
# We "cheat" here and look at the mesh shape and layout. This is to ensure
# that the number of groups (h.size) is a multiple of the mesh dimension
# over which those groups are split.
num_groups, group_size = _split_into_groups(
a0.size * g.size * c.size,
hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, a0))
t = mtf.Dimension("group_size_y", group_size)
h0 = mtf.Dimension(a0.name, num_groups)
h = mtf.Dimension(a0.name + "_unsplit", h0.size)
expert_capacity = min(
t.size,
int((t.size * hparams.moe_capacity_factor_second_level) / y.size))
expert_capacity = max(expert_capacity, 4)
d = mtf.Dimension("expert_capacity_y", expert_capacity)
# First level of expert routing
# Reshape the inner batch size to a multiple of group_dim g1 and
# group_size_dim s.
inputs = mtf.reshape(inputs, [a0, g1, s, m])
# Get the assignments for the first level.
# dispatch_tensor_x has shape [a0, g1, s, x, c]
if hparams.moe_gating == "top_2":
dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=x,
expert_capacity_dim=c,
hparams=hparams,
train=train)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# Now create expert_inputs based on the assignments.
# put num_experts dimension first to make split easier in alltoall
expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m])
# we construct an "importance" Tensor for the inputs to the second-level
# gating. The importance of an input is 1.0 if it represents the
# first-choice expert-group and 0.5 if it represents the second-choice expert
# group. This is used by the second-level gating.
importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c])
importance = 0.5 * (
mtf.to_float(mtf.greater(importance, 0.5)) +
mtf.to_float(mtf.greater(importance, 0.0)))
# First level, all to all. Here we change the split dimension from g1 to x1.
expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape(
[x1, a0, g, c, m]))
importance = mtf.reshape(importance, [x1, a0, g, c])
# Second level of expert routing
# Reshape the expert_inputs outer batch dim to be a multiple of group_dim h0
# and group_size_dim t.
inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m])
importance = mtf.reshape(importance, [x1, h0, t])
# Get the assignments for the second level.
# dispatch_tensor_y has shape [x1, h0, t, y, d]
if hparams.moe_gating == "top_2":
dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating(
inputs=inputs_y,
outer_expert_dims=[x1],
experts_dim=y,
expert_capacity_dim=d,
hparams=hparams,
train=train,
importance=importance)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# Now create expert_inputs based on the assignments.
# put num_experts dimension first to make split easier in alltoall
expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m])
# Second level, all to all. Here we change the split dimension from h0 to y0.
expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape(
[y0, x1, h, d, m]))
hidden_output = mtf.layers.dense(
expert_inputs_y, hidden_dim, expert_dims=[y0, x1],
activation=mtf.relu, use_bias=False, master_dtype=master_dtype,
slice_dtype=slice_dtype, name="expert0")
expert_output = mtf.layers.dense(
hidden_output, output_dim, expert_dims=[y0, x1],
use_bias=False, master_dtype=master_dtype, slice_dtype=slice_dtype,
name="expert1")
# NOW COMBINE EXPERT OUTPUTS (reversing everything we have done)
# expert_output has shape [y0, x1, h, d, n]
# alltoall
expert_output = mtf.reshape(expert_output, mtf.Shape(
[y, x1, h0, d, n]))
# combine results from inner level
output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n])
# Reshape the combined tensor from inner level to now contain outer_batch_dim
# a0 and group_dim g
output = mtf.reshape(output_y, [x1, a0, g, c, n])
# alltoall from expert_dim x to group_dim g1
expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n]))
# combine results from outer level
output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n])
# Reshape the combined tensor to now contain inner_batch_dim
# b1 and the original sequence length
output = mtf.reshape(output_x, [a0, b1, l, n])
if insert_outer_batch_dim:
output = mtf.reshape(output, [b1, l, n])
return output, (loss_outer + loss_inner) * hparams.moe_loss_coef
def _top_2_gating(
inputs, outer_expert_dims, experts_dim, expert_capacity_dim,
hparams, train, importance=None):
"""Compute gating for mixture-of-experts in TensorFlow.
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_use_second_place_loss: a boolean
hparams.moe_second_policy_train: a string
hparams.moe_second_policy_eval: a string
hparams.moe_second_threshold: a float
The returned forward assignment is a tensor used to map (via einsum) from the
inputs to the expert_inputs. Likewise, the returned combine_tensor is
used to map (via einsum) from the expert outputs to the outputs. Both the
forward and backward assignments are mostly zeros. The shapes of the tensors
are as follows.
inputs: [<batch_dims>, group_size_dim, input_dim]
importance: [<batch_dims>, group_size_dim]
dispatch_tensor:
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
expert_inputs:
[<batch_dims>, experts_dim, expert_capacity_dim, input_dim]
expert_outputs: [<batch_dims>, experts_dim, expert_capacity_dim, output_dim]
combine_tensor:
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
outputs: [<batch_dims>, group_size_dim, output_dim]
"importance" is an optional tensor with one floating-point value for each
input vector. If the importance of an input is 1.0, then we send it to
up to 2 experts. If 0.0 < importance < 1.0, then we send it to at most
one expert. If importance == 0.0, then we send it to no experts.
We use "importance" at the second-level gating function of a hierarchical
mixture of experts. Inputs to the first-choice expert-group get importance
1.0. Inputs to the second-choice expert group get importance 0.5.
Inputs that represent padding get importance 0.0.
Args:
inputs: a mtf.Tensor with shape [<batch_dims>, group_size_dim, input_dim]
outer_expert_dims: an optional list of dimensions. This is for the case
where we are at an inner level of a hierarchical MoE.
experts_dim: a Dimension (the number of experts)
expert_capacity_dim: a Dimension (number of examples per group per expert)
hparams: model hyperparameters.
train: a boolean
importance: an optional tensor with shape [<batch_dims>, group_size_dim]
Returns:
dispatch_tensor: a Tensor with shape
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
combine_tensor: a Tensor with shape
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
loss: a mtf scalar
Raises:
ValueError: on illegal hyperparameters
"""
group_size_dim, unused_input_dim = inputs.shape.dims[-2:]
raw_gates = mtf.softmax(mtf.layers.dense(
inputs, experts_dim, use_bias=False,
expert_dims=outer_expert_dims), experts_dim)
# The internals of this function run in float32.
# bfloat16 seems to reduce quality.
raw_gates = mtf.to_float(raw_gates)
expert_capacity_f = float(expert_capacity_dim.size)
# FIND TOP 2 EXPERTS PER POSITON
# Find the top expert for each position. shape=[batch, group]
index_1, gate_1 = mtf.top_1(raw_gates, experts_dim)
# [batch, group, experts]
mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype)
density_1_proxy = raw_gates
if importance is not None:
mask_1 *= mtf.to_float(mtf.equal(importance, 1.0))
gate_1 *= mtf.to_float(mtf.equal(importance, 1.0))
density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0))
gates_without_top_1 = raw_gates * (1.0 - mask_1)
# [batch, group]
index_2, gate_2 = mtf.top_1(gates_without_top_1, experts_dim)
# [batch, group, experts]
mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype)
if importance is not None:
mask_2 *= mtf.to_float(mtf.greater(importance, 0.0))
denom = gate_1 + gate_2 + 1e-9
gate_1 /= denom
gate_2 /= denom
# BALANCING LOSSES
# shape = [batch, experts]
# We want to equalize the fraction of the batch assigned to each expert
density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim)
# Something continuous that is correlated with what we want to equalize.
density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim)
density_1 = mtf.Print(
density_1, [mtf.reduce_mean(density_1, output_shape=[experts_dim])],
"density_1", summarize=1000)
loss = (mtf.reduce_mean(density_1_proxy * density_1)
* float(experts_dim.size * experts_dim.size))
if hparams.moe_use_second_place_loss:
# Also add a loss to encourage all experts to be used equally also as the
# second-place expert. Experimentally, this seems to be a wash.
# We want to equalize the fraction of the batch assigned to each expert:
density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim)
# As a proxy for density_2, we renormalize the raw gates after the top one
# has been removed.
normalized = gates_without_top_1 / (
mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9)
density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim)
loss_2 = (mtf.reduce_mean(density_2_proxy * density_2)
* float(experts_dim.size * experts_dim.size))
loss += loss_2 * 0.5
# Depending on the policy in the hparams, we may drop out some of the
# second-place experts.
policy = (
hparams.moe_second_policy_train if train else
hparams.moe_second_policy_eval)
threshold = (
hparams.moe_second_threshold_train if train else
hparams.moe_second_threshold_eval)
if policy == "all":
# Use second-place experts for all examples.
pass
elif policy == "none":
# Never use second-place experts for all examples.
mask_2 = mtf.zeros_like(mask_2)
elif policy == "threshold":
# Use second-place experts if gate_2 > threshold.
mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold))
elif policy == "random":
# Use second-place experts with probablity min(1.0, gate_2 / threshold).
mask_2 *= mtf.to_float(
mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape),
gate_2 / max(threshold, 1e-9)))
else:
raise ValueError("Unknown policy %s" % policy)
mask_2 = mtf.Print(
mask_2, [mtf.reduce_mean(mask_2, output_shape=[experts_dim])],
"density_2", summarize=1000)
# COMPUTE ASSIGNMENT TO EXPERTS
# [batch, group, experts]
# This is the position within the expert's mini-batch for this sequence
position_in_expert_1 = mtf.cumsum(
mask_1, group_size_dim, exclusive=True) * mask_1
# Remove the elements that don't fit. [batch, group, experts]
mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f))
# [batch, experts]
# How many examples in this sequence go to this expert
mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim)
# [batch, group] - mostly ones, but zeros where something didn't fit
mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim)
# [batch, group]
position_in_expert_1 = mtf.reduce_sum(
position_in_expert_1, reduced_dim=experts_dim)
# Weight assigned to first expert. [batch, group]
gate_1 *= mask_1_flat
# [batch, group, experts]
position_in_expert_2 = (
mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count)
position_in_expert_2 *= mask_2
mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f))
# mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
gate_2 *= mask_2_flat
position_in_expert_2 = mtf.reduce_sum(
position_in_expert_2, reduced_dim=experts_dim)
# [batch, group, experts, expert_capacity]
combine_tensor = (
gate_1 * mask_1_flat
* mtf.one_hot(index_1, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) +
gate_2 * mask_2_flat
* mtf.one_hot(index_2, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim))
combine_tensor = mtf.cast(combine_tensor, inputs.dtype)
loss = mtf.cast(loss, inputs.dtype)
dispatch_tensor = mtf.cast(
mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)
return dispatch_tensor, combine_tensor, loss
def set_default_moe_hparams(hparams):
"""Add necessary hyperparameters for mixture-of-experts."""
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-2
hparams.add_hparam("moe_gating", "top_2")
# Experts have fixed capacity per batch. We need some extra capacity
# in case gating is not perfectly balanced.
# moe_capacity_factor_* should be set to a value >=1.
hparams.add_hparam("moe_capacity_factor_train", 1.25)
hparams.add_hparam("moe_capacity_factor_eval", 2.0)
hparams.add_hparam("moe_capacity_factor_second_level", 1.0)
# Each expert has a hidden layer with this size.
hparams.add_hparam("moe_hidden_size", 4096)
# For gating, divide inputs into groups of this size before gating.
# Each group sends the same number of inputs to each expert.
# Ideally, the group size would be the whole batch, but this is expensive
# due to our use of matrix multiplication for reordering.
hparams.add_hparam("moe_group_size", 1024)
# For top_2 gating, whether to impose an additional loss in order to make
# the experts equally used as the second-place expert.
hparams.add_hparam("moe_use_second_place_loss", 0)
# In top_2 gating, policy for whether to use a second-place expert.
# Legal values are:
# "all": always
# "none": never
# "threshold": if gate value > the given threshold
# "random": if gate value > threshold*random_uniform(0,1)
hparams.add_hparam("moe_second_policy_train", "random")
hparams.add_hparam("moe_second_policy_eval", "random")
hparams.add_hparam("moe_second_threshold_train", 0.2)
hparams.add_hparam("moe_second_threshold_eval", 0.2)
def _split_into_groups(n, max_group_size, mesh_dim_size):
"""Helper function for figuring out how to split a dimensino into groups.
We have a dimension with size n and we want to split it into
two dimensions: n = num_groups * group_size
group_size should be the largest possible value meeting the constraints:
group_size <= max_group_size
(num_groups = n/group_size) is a multiple of mesh_dim_size
Args:
n: an integer
max_group_size: an integer
mesh_dim_size: an integer
Returns:
num_groups: an integer
group_size: an integer
Raises:
ValueError: if n is not a multiple of mesh_dim_size
"""
if n % mesh_dim_size != 0:
raise ValueError(
"n=%d is not a multiple of mesh_dim_size=%d" % (n, mesh_dim_size))
num_groups = max(1, n // max_group_size)
while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):
num_groups += 1
group_size = n // num_groups
tf.logging.info(
"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)"
" = (num_groups=%d group_size=%d)" %
(n, max_group_size, mesh_dim_size, num_groups, group_size))
return num_groups, group_size
| 40.122059
| 80
| 0.728549
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import tensorflow as tf
def transformer_moe_layer_v1(inputs, output_dim, hparams, train,
master_dtype=tf.bfloat16,
slice_dtype=tf.float32):
orig_inputs = inputs
input_dim = inputs.shape.dims[-1]
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
experts_dim = mtf.Dimension("experts", hparams.moe_num_experts)
group_size_dim = mtf.Dimension("group", hparams.moe_group_size)
batch_dim = mtf.Dimension(
orig_inputs.shape[0].name,
orig_inputs.shape.size // (group_size_dim.size * input_dim.size))
inputs = mtf.reshape(inputs, [batch_dim, group_size_dim, input_dim])
capacity_factor = (
hparams.moe_capacity_factor_train if train else
hparams.moe_capacity_factor_eval)
expert_capacity = min(
group_size_dim.size,
int((group_size_dim.size * capacity_factor) / experts_dim.size))
expert_capacity_dim = mtf.Dimension("expert_capacity", expert_capacity)
experts_dim_unsplit = mtf.Dimension("expert_unsplit", experts_dim.size)
batch_dim_unsplit = mtf.Dimension("batch_unsplit", batch_dim.size)
if hparams.moe_gating == "top_2":
dispatch_tensor, combine_tensor, loss = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=experts_dim_unsplit,
expert_capacity_dim=expert_capacity_dim,
hparams=hparams,
train=train)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
expert_inputs = mtf.einsum([inputs, dispatch_tensor], mtf.Shape(
[experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))
expert_inputs = mtf.reshape(expert_inputs, mtf.Shape(
[experts_dim, batch_dim_unsplit, expert_capacity_dim, input_dim]))
h = mtf.layers.dense(
expert_inputs, hidden_dim, expert_dims=[experts_dim],
activation=mtf.relu, use_bias=False, master_dtype=master_dtype,
slice_dtype=slice_dtype, name="x0")
expert_output = mtf.layers.dense(
h, output_dim, expert_dims=[experts_dim], use_bias=False,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="x1")
expert_output = mtf.reshape(expert_output, mtf.Shape(
[experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))
output = mtf.einsum([expert_output, combine_tensor], mtf.Shape(
[batch_dim, group_size_dim, output_dim]))
output = mtf.reshape(output, orig_inputs.shape.dims[:-1] + [output_dim])
return output, loss * hparams.moe_loss_coef
def transformer_moe_layer_v2(inputs, output_dim, hparams, train,
master_dtype=tf.bfloat16, slice_dtype=tf.float32):
insert_outer_batch_dim = (len(inputs.shape.dims) == 3)
if insert_outer_batch_dim:
inputs = mtf.reshape(
inputs, [mtf.Dimension("outer_batch", 1)] + inputs.shape.dims)
assert len(hparams.moe_num_experts) == 2
a0, b1, l, m = inputs.shape.dims
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
x1 = mtf.Dimension("expert_x", hparams.moe_num_experts[0])
y0 = mtf.Dimension("expert_y", hparams.moe_num_experts[1])
x = mtf.Dimension("expert_x_unsplit", hparams.moe_num_experts[0])
y = mtf.Dimension("expert_y_unsplit", hparams.moe_num_experts[1])
n = output_dim
num_groups, group_size = _split_into_groups(
b1.size * l.size, hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, b1))
g1 = mtf.Dimension(b1.name, num_groups)
g = mtf.Dimension(b1.name + "_unsplit", g1.size)
s = mtf.Dimension("group_size_x", group_size)
capacity_factor = (
hparams.moe_capacity_factor_train if train else
hparams.moe_capacity_factor_eval)
expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size))
expert_capacity = max(expert_capacity, 4)
c = mtf.Dimension("expert_capacity_x", expert_capacity)
num_groups, group_size = _split_into_groups(
a0.size * g.size * c.size,
hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, a0))
t = mtf.Dimension("group_size_y", group_size)
h0 = mtf.Dimension(a0.name, num_groups)
h = mtf.Dimension(a0.name + "_unsplit", h0.size)
expert_capacity = min(
t.size,
int((t.size * hparams.moe_capacity_factor_second_level) / y.size))
expert_capacity = max(expert_capacity, 4)
d = mtf.Dimension("expert_capacity_y", expert_capacity)
inputs = mtf.reshape(inputs, [a0, g1, s, m])
if hparams.moe_gating == "top_2":
dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=x,
expert_capacity_dim=c,
hparams=hparams,
train=train)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m])
importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c])
importance = 0.5 * (
mtf.to_float(mtf.greater(importance, 0.5)) +
mtf.to_float(mtf.greater(importance, 0.0)))
expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape(
[x1, a0, g, c, m]))
importance = mtf.reshape(importance, [x1, a0, g, c])
inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m])
importance = mtf.reshape(importance, [x1, h0, t])
if hparams.moe_gating == "top_2":
dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating(
inputs=inputs_y,
outer_expert_dims=[x1],
experts_dim=y,
expert_capacity_dim=d,
hparams=hparams,
train=train,
importance=importance)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m])
expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape(
[y0, x1, h, d, m]))
hidden_output = mtf.layers.dense(
expert_inputs_y, hidden_dim, expert_dims=[y0, x1],
activation=mtf.relu, use_bias=False, master_dtype=master_dtype,
slice_dtype=slice_dtype, name="expert0")
expert_output = mtf.layers.dense(
hidden_output, output_dim, expert_dims=[y0, x1],
use_bias=False, master_dtype=master_dtype, slice_dtype=slice_dtype,
name="expert1")
expert_output = mtf.reshape(expert_output, mtf.Shape(
[y, x1, h0, d, n]))
output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n])
output = mtf.reshape(output_y, [x1, a0, g, c, n])
expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n]))
output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n])
output = mtf.reshape(output_x, [a0, b1, l, n])
if insert_outer_batch_dim:
output = mtf.reshape(output, [b1, l, n])
return output, (loss_outer + loss_inner) * hparams.moe_loss_coef
def _top_2_gating(
inputs, outer_expert_dims, experts_dim, expert_capacity_dim,
hparams, train, importance=None):
group_size_dim, unused_input_dim = inputs.shape.dims[-2:]
raw_gates = mtf.softmax(mtf.layers.dense(
inputs, experts_dim, use_bias=False,
expert_dims=outer_expert_dims), experts_dim)
raw_gates = mtf.to_float(raw_gates)
expert_capacity_f = float(expert_capacity_dim.size)
index_1, gate_1 = mtf.top_1(raw_gates, experts_dim)
mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype)
density_1_proxy = raw_gates
if importance is not None:
mask_1 *= mtf.to_float(mtf.equal(importance, 1.0))
gate_1 *= mtf.to_float(mtf.equal(importance, 1.0))
density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0))
gates_without_top_1 = raw_gates * (1.0 - mask_1)
index_2, gate_2 = mtf.top_1(gates_without_top_1, experts_dim)
mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype)
if importance is not None:
mask_2 *= mtf.to_float(mtf.greater(importance, 0.0))
denom = gate_1 + gate_2 + 1e-9
gate_1 /= denom
gate_2 /= denom
density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim)
density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim)
density_1 = mtf.Print(
density_1, [mtf.reduce_mean(density_1, output_shape=[experts_dim])],
"density_1", summarize=1000)
loss = (mtf.reduce_mean(density_1_proxy * density_1)
* float(experts_dim.size * experts_dim.size))
if hparams.moe_use_second_place_loss:
density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim)
normalized = gates_without_top_1 / (
mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9)
density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim)
loss_2 = (mtf.reduce_mean(density_2_proxy * density_2)
* float(experts_dim.size * experts_dim.size))
loss += loss_2 * 0.5
policy = (
hparams.moe_second_policy_train if train else
hparams.moe_second_policy_eval)
threshold = (
hparams.moe_second_threshold_train if train else
hparams.moe_second_threshold_eval)
if policy == "all":
pass
elif policy == "none":
mask_2 = mtf.zeros_like(mask_2)
elif policy == "threshold":
mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold))
elif policy == "random":
mask_2 *= mtf.to_float(
mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape),
gate_2 / max(threshold, 1e-9)))
else:
raise ValueError("Unknown policy %s" % policy)
mask_2 = mtf.Print(
mask_2, [mtf.reduce_mean(mask_2, output_shape=[experts_dim])],
"density_2", summarize=1000)
position_in_expert_1 = mtf.cumsum(
mask_1, group_size_dim, exclusive=True) * mask_1
# Remove the elements that don't fit. [batch, group, experts]
mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f))
mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim)
mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim)
# [batch, group]
position_in_expert_1 = mtf.reduce_sum(
position_in_expert_1, reduced_dim=experts_dim)
# Weight assigned to first expert. [batch, group]
gate_1 *= mask_1_flat
# [batch, group, experts]
position_in_expert_2 = (
mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count)
position_in_expert_2 *= mask_2
mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f))
# mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
gate_2 *= mask_2_flat
position_in_expert_2 = mtf.reduce_sum(
position_in_expert_2, reduced_dim=experts_dim)
# [batch, group, experts, expert_capacity]
combine_tensor = (
gate_1 * mask_1_flat
* mtf.one_hot(index_1, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) +
gate_2 * mask_2_flat
* mtf.one_hot(index_2, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim))
combine_tensor = mtf.cast(combine_tensor, inputs.dtype)
loss = mtf.cast(loss, inputs.dtype)
dispatch_tensor = mtf.cast(
mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)
return dispatch_tensor, combine_tensor, loss
def set_default_moe_hparams(hparams):
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-2
hparams.add_hparam("moe_gating", "top_2")
# Experts have fixed capacity per batch. We need some extra capacity
# in case gating is not perfectly balanced.
# moe_capacity_factor_* should be set to a value >=1.
hparams.add_hparam("moe_capacity_factor_train", 1.25)
hparams.add_hparam("moe_capacity_factor_eval", 2.0)
hparams.add_hparam("moe_capacity_factor_second_level", 1.0)
# Each expert has a hidden layer with this size.
hparams.add_hparam("moe_hidden_size", 4096)
# For gating, divide inputs into groups of this size before gating.
# Each group sends the same number of inputs to each expert.
# Ideally, the group size would be the whole batch, but this is expensive
# due to our use of matrix multiplication for reordering.
hparams.add_hparam("moe_group_size", 1024)
# For top_2 gating, whether to impose an additional loss in order to make
# the experts equally used as the second-place expert.
hparams.add_hparam("moe_use_second_place_loss", 0)
# In top_2 gating, policy for whether to use a second-place expert.
# Legal values are:
# "all": always
# "none": never
# "threshold": if gate value > the given threshold
# "random": if gate value > threshold*random_uniform(0,1)
hparams.add_hparam("moe_second_policy_train", "random")
hparams.add_hparam("moe_second_policy_eval", "random")
hparams.add_hparam("moe_second_threshold_train", 0.2)
hparams.add_hparam("moe_second_threshold_eval", 0.2)
def _split_into_groups(n, max_group_size, mesh_dim_size):
if n % mesh_dim_size != 0:
raise ValueError(
"n=%d is not a multiple of mesh_dim_size=%d" % (n, mesh_dim_size))
num_groups = max(1, n // max_group_size)
while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):
num_groups += 1
group_size = n // num_groups
tf.logging.info(
"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)"
" = (num_groups=%d group_size=%d)" %
(n, max_group_size, mesh_dim_size, num_groups, group_size))
return num_groups, group_size
| true
| true
|
f70b0f4818fe2a2313130690f64f8143214ce044
| 2,082
|
py
|
Python
|
generator/mnistGenerator.py
|
Kotwic4/SCOTR
|
6afabedb672641a9777d8aa9d7b75f998e53c0c9
|
[
"MIT"
] | 2
|
2018-01-15T12:27:10.000Z
|
2019-01-30T18:42:29.000Z
|
generator/mnistGenerator.py
|
Kotwic4/SCOTR
|
6afabedb672641a9777d8aa9d7b75f998e53c0c9
|
[
"MIT"
] | null | null | null |
generator/mnistGenerator.py
|
Kotwic4/SCOTR
|
6afabedb672641a9777d8aa9d7b75f998e53c0c9
|
[
"MIT"
] | null | null | null |
import random
from sklearn.datasets import fetch_mldata
from util import open_file_in_directory
MNIST_DIR = './tmp/mnist'
MNIST_TRAIN_DIR = './mnist/train'
MNIST_TEST_DIR = './mnist/test'
MNIST_SAMPLE_DIR = './mnist/sample'
TEST_CASES = 60000
def mnist_img_to_file(mnist_img, file):
for x in range(28):
for y in range(28):
file.write(str(mnist_img[x * 28 + y]) + " ")
file.write('\n')
def generate_samples(data, labels, directory='.', filename='results.txt', sampleNumber=100):
result = open_file_in_directory(directory, filename)
for i in range(sampleNumber):
index = random.randrange(data.shape[0])
label = labels[index]
img = data[index]
img_filename = str(index) + ".txt"
line = img_filename + ' ' + str(label) + '\n'
result.write(line)
file = open_file_in_directory(directory, img_filename)
mnist_img_to_file(img, file)
file.close()
result.close()
def generate_test_file(data, labels, directory='.', filename='results.txt'):
result = open_file_in_directory(directory, filename)
result.write(str(data.shape[0]) + '\n')
indexes = [i for i in range(data.shape[0])]
random.shuffle(indexes)
for i in indexes:
label = labels[i]
img = data[i]
line = str(label) + '\n'
result.write(line)
mnist_img_to_file(img, result)
result.close()
def generate_test_data(data, labels):
test_data = data[TEST_CASES:]
test_labels = labels[TEST_CASES:]
generate_test_file(test_data, test_labels, MNIST_TEST_DIR)
def generate_train_data(data, labels):
train_data = data[:TEST_CASES]
train_labels = labels[:TEST_CASES]
generate_test_file(train_data, train_labels, MNIST_TRAIN_DIR)
def main():
mnist = fetch_mldata('MNIST original', data_home=MNIST_DIR)
labels = mnist.target.astype(int)
data = mnist.data
generate_train_data(data, labels)
generate_test_data(data, labels)
generate_samples(data, labels, MNIST_SAMPLE_DIR)
if __name__ == "__main__":
main()
| 28.520548
| 92
| 0.67195
|
import random
from sklearn.datasets import fetch_mldata
from util import open_file_in_directory
MNIST_DIR = './tmp/mnist'
MNIST_TRAIN_DIR = './mnist/train'
MNIST_TEST_DIR = './mnist/test'
MNIST_SAMPLE_DIR = './mnist/sample'
TEST_CASES = 60000
def mnist_img_to_file(mnist_img, file):
for x in range(28):
for y in range(28):
file.write(str(mnist_img[x * 28 + y]) + " ")
file.write('\n')
def generate_samples(data, labels, directory='.', filename='results.txt', sampleNumber=100):
result = open_file_in_directory(directory, filename)
for i in range(sampleNumber):
index = random.randrange(data.shape[0])
label = labels[index]
img = data[index]
img_filename = str(index) + ".txt"
line = img_filename + ' ' + str(label) + '\n'
result.write(line)
file = open_file_in_directory(directory, img_filename)
mnist_img_to_file(img, file)
file.close()
result.close()
def generate_test_file(data, labels, directory='.', filename='results.txt'):
result = open_file_in_directory(directory, filename)
result.write(str(data.shape[0]) + '\n')
indexes = [i for i in range(data.shape[0])]
random.shuffle(indexes)
for i in indexes:
label = labels[i]
img = data[i]
line = str(label) + '\n'
result.write(line)
mnist_img_to_file(img, result)
result.close()
def generate_test_data(data, labels):
test_data = data[TEST_CASES:]
test_labels = labels[TEST_CASES:]
generate_test_file(test_data, test_labels, MNIST_TEST_DIR)
def generate_train_data(data, labels):
train_data = data[:TEST_CASES]
train_labels = labels[:TEST_CASES]
generate_test_file(train_data, train_labels, MNIST_TRAIN_DIR)
def main():
mnist = fetch_mldata('MNIST original', data_home=MNIST_DIR)
labels = mnist.target.astype(int)
data = mnist.data
generate_train_data(data, labels)
generate_test_data(data, labels)
generate_samples(data, labels, MNIST_SAMPLE_DIR)
if __name__ == "__main__":
main()
| true
| true
|
f70b0fff4768688affbca729bacf2b1bd853c80d
| 1,547
|
py
|
Python
|
apt/transport/transport.py
|
javajawa/debian-repo-remux
|
b6626b268acd1743208d8a399f8c975316cfbc80
|
[
"BSD-2-Clause"
] | 1
|
2019-10-31T08:36:29.000Z
|
2019-10-31T08:36:29.000Z
|
apt/transport/transport.py
|
javajawa/debian-repo-remux
|
b6626b268acd1743208d8a399f8c975316cfbc80
|
[
"BSD-2-Clause"
] | null | null | null |
apt/transport/transport.py
|
javajawa/debian-repo-remux
|
b6626b268acd1743208d8a399f8c975316cfbc80
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Abstract Transport
"""
import typing
import abc
from apt.transport.directorylisting import DirectoryListing
class Transport:
"""
Abstract class for retrieving information from repos
The functions 'exists' and 'open_read' are required to be implemented.
"""
@abc.abstractmethod
def exists(self, uri: str) -> bool:
"""
Returns whether a given uri exists.
:param str uri:
:return bool:
:raises URIMismatchError:
"""
@abc.abstractmethod
def open_read(self, uri: str) -> typing.IO:
"""
Opens a file as an IO-like for reading
:param string uri:
:return IO:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
@abc.abstractmethod
def open_write(self, uri: str) -> typing.IO:
"""
Opens a file as an IO-like for writing
This function is required to handle the operation of creating directories
if the underlying data store has such a concept.
:param string uri:
:return:
:raises NotImplementedError:
:raises URIMismatchError:
"""
@abc.abstractmethod
def list_directory(self, uri: str) -> DirectoryListing:
"""
Returns a list of files and directories in a directory
:param string uri:
:return List[str]:
:raises NotImplementedError:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
| 20.626667
| 81
| 0.606981
|
import typing
import abc
from apt.transport.directorylisting import DirectoryListing
class Transport:
@abc.abstractmethod
def exists(self, uri: str) -> bool:
@abc.abstractmethod
def open_read(self, uri: str) -> typing.IO:
@abc.abstractmethod
def open_write(self, uri: str) -> typing.IO:
@abc.abstractmethod
def list_directory(self, uri: str) -> DirectoryListing:
| true
| true
|
f70b102230ce619e7bdf83c48010380e4304b537
| 4,264
|
py
|
Python
|
biointeract/hub/dataload/sources/ConsensusPathDB/parser.py
|
biothings/biothings_interactions
|
7a8b16e8119d6505b6b5d89623051c11f3649430
|
[
"Apache-2.0"
] | null | null | null |
biointeract/hub/dataload/sources/ConsensusPathDB/parser.py
|
biothings/biothings_interactions
|
7a8b16e8119d6505b6b5d89623051c11f3649430
|
[
"Apache-2.0"
] | null | null | null |
biointeract/hub/dataload/sources/ConsensusPathDB/parser.py
|
biothings/biothings_interactions
|
7a8b16e8119d6505b6b5d89623051c11f3649430
|
[
"Apache-2.0"
] | null | null | null |
"""
CPDParser parses the ConsensusPathDB_human_PPI data file and yields
a generated dictionary of values.
Source Project: biothings.interactions
Author: Greg Taylor: greg.k.taylor@gmail.com
"""
import hashlib
import re
from hub.dataload.BiointeractParser import BiointeractParser
class CPDParser(BiointeractParser):
# Static Constants
EMPTY_FIELD = 'NA'
SEPARATOR = ','
HUMAN = '_HUMAN'
@staticmethod
def parse_interaction_participants(entry):
"""
Parse all interaction participants given as string from the tsv file.
The resulting participant identifier strings will be returned with a
trailing '_HUMAN' removed at the end.
:param entry: a string representing the list
:return: list of strings
"""
vals = CPDParser.parse_list(entry, CPDParser.SEPARATOR)
return list(map((lambda x: x.replace(CPDParser.HUMAN, '')), vals)) if vals else None
@staticmethod
def parse_interaction_publications(entry):
"""
Parse all interaction publications given as a string from the tsv file.
The resulting publication identifier strings will be converted to a
list of integers representing pubmed identifiers.
:param entry: a string representing the list
:return: list of integers
"""
vals = CPDParser.parse_list(entry, CPDParser.SEPARATOR)
return list(map(CPDParser.safe_int, vals)) if vals else None
@staticmethod
def parse_source_databases(entry):
"""
Parse all source databases given as a string from the tsv file.
:param entry: a string representing the list
:return: list of strings
"""
return CPDParser.parse_list(entry, CPDParser.SEPARATOR)
@staticmethod
def parse_cpd_tsv_line(line_dict):
"""
Parse a dictionary representing a tsv line with a key, value pair for
each column in the tsv file.
:param line_dict: a tsv line dictionary
:return: a dictionary representing a parsed biogrid record
"""
# Replace all empty fields with None
r = {k: v if v != CPDParser.EMPTY_FIELD else None for k, v in line_dict.items()}
r['interaction_confidence'] = CPDParser.safe_float(r['interaction_confidence'])
r['interaction_participants'] = CPDParser.parse_interaction_participants(r['interaction_participants'])
r['interaction_publications'] = CPDParser.parse_interaction_publications(r['interaction_publications'])
r['source_databases'] = CPDParser.parse_source_databases(r['source_databases'])
# Readjust for biothings.api record format
new_record = dict()
new_record['cpd'] = r
new_record['_id'] = CPDParser.compute_id(r['interaction_participants'])
# Sweep all empty values
new_record = CPDParser.sweep_record(new_record)
return new_record
@staticmethod
def parse_cpd_tsv_file(f):
"""
Parse a tab-separated biogrid file opened in binary mode.
:param f: file opened for reading in binary mode
:return: yields a generator of parsed objects
"""
for (i, line) in enumerate(f):
line = line.strip('\n')
# The first commented line is the database description
# The second commented line contains the column headers
if i == 1:
line = line.replace("# ", '') # Delete the comment prefix
header_dict = dict(enumerate(line.split('\t')))
print(header_dict)
# All subsequent lines contain row data
elif i > 1:
_r = {}
for (pos, val) in enumerate(line.split('\t')):
_r[header_dict[pos]] = val
yield CPDParser.parse_cpd_tsv_line(_r)
@staticmethod
def compute_id(participate_lst):
"""
Calculate an id field given a list of participants (which are gene symbols).
:param participate_lst:
:return:
"""
symbols = '-'.join(participate_lst)
hash_object = hashlib.md5(symbols.encode('utf-8'))
symbol_hash = hash_object.hexdigest()
return 'symbol:{}'.format(symbol_hash)
| 37.403509
| 111
| 0.64728
|
import hashlib
import re
from hub.dataload.BiointeractParser import BiointeractParser
class CPDParser(BiointeractParser):
EMPTY_FIELD = 'NA'
SEPARATOR = ','
HUMAN = '_HUMAN'
@staticmethod
def parse_interaction_participants(entry):
vals = CPDParser.parse_list(entry, CPDParser.SEPARATOR)
return list(map((lambda x: x.replace(CPDParser.HUMAN, '')), vals)) if vals else None
@staticmethod
def parse_interaction_publications(entry):
vals = CPDParser.parse_list(entry, CPDParser.SEPARATOR)
return list(map(CPDParser.safe_int, vals)) if vals else None
@staticmethod
def parse_source_databases(entry):
return CPDParser.parse_list(entry, CPDParser.SEPARATOR)
@staticmethod
def parse_cpd_tsv_line(line_dict):
r = {k: v if v != CPDParser.EMPTY_FIELD else None for k, v in line_dict.items()}
r['interaction_confidence'] = CPDParser.safe_float(r['interaction_confidence'])
r['interaction_participants'] = CPDParser.parse_interaction_participants(r['interaction_participants'])
r['interaction_publications'] = CPDParser.parse_interaction_publications(r['interaction_publications'])
r['source_databases'] = CPDParser.parse_source_databases(r['source_databases'])
new_record = dict()
new_record['cpd'] = r
new_record['_id'] = CPDParser.compute_id(r['interaction_participants'])
new_record = CPDParser.sweep_record(new_record)
return new_record
@staticmethod
def parse_cpd_tsv_file(f):
for (i, line) in enumerate(f):
line = line.strip('\n')
if i == 1:
line = line.replace("# ", '')
header_dict = dict(enumerate(line.split('\t')))
print(header_dict)
elif i > 1:
_r = {}
for (pos, val) in enumerate(line.split('\t')):
_r[header_dict[pos]] = val
yield CPDParser.parse_cpd_tsv_line(_r)
@staticmethod
def compute_id(participate_lst):
symbols = '-'.join(participate_lst)
hash_object = hashlib.md5(symbols.encode('utf-8'))
symbol_hash = hash_object.hexdigest()
return 'symbol:{}'.format(symbol_hash)
| true
| true
|
f70b1091614744431199f5372bcc30b19abcfd96
| 378
|
py
|
Python
|
tests/test_things.py
|
3jackdaws/distributed-asgi
|
acc341befe29b9e16ccb9da3d8887dff99636b2a
|
[
"MIT"
] | 1
|
2019-02-23T11:11:52.000Z
|
2019-02-23T11:11:52.000Z
|
tests/test_things.py
|
3jackdaws/distributed-asgi
|
acc341befe29b9e16ccb9da3d8887dff99636b2a
|
[
"MIT"
] | null | null | null |
tests/test_things.py
|
3jackdaws/distributed-asgi
|
acc341befe29b9e16ccb9da3d8887dff99636b2a
|
[
"MIT"
] | null | null | null |
import pytest
from distributed_asgi import create_path_distributor
def test_path_distributor():
dist = create_path_distributor(routes={
"/api/([a-z-]+)": r"\1"
})
for path, expected_key in [
("/api/banana", "banana"),
("/banana", None),
()
]:
instance = dist({"path":path})
assert instance.key == expected_key
| 21
| 52
| 0.582011
|
import pytest
from distributed_asgi import create_path_distributor
def test_path_distributor():
dist = create_path_distributor(routes={
"/api/([a-z-]+)": r"\1"
})
for path, expected_key in [
("/api/banana", "banana"),
("/banana", None),
()
]:
instance = dist({"path":path})
assert instance.key == expected_key
| true
| true
|
f70b10af0be0cb3da3d2d4e4ce538bc6e4775287
| 4,487
|
py
|
Python
|
metadata_service/__init__.py
|
worldwise001/amundsenmetadatalibrary
|
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
|
[
"Apache-2.0"
] | null | null | null |
metadata_service/__init__.py
|
worldwise001/amundsenmetadatalibrary
|
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
|
[
"Apache-2.0"
] | 1
|
2019-09-21T23:59:46.000Z
|
2019-09-21T23:59:46.000Z
|
metadata_service/__init__.py
|
worldwise001/amundsenmetadatalibrary
|
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
|
[
"Apache-2.0"
] | 1
|
2019-09-21T23:56:40.000Z
|
2019-09-21T23:56:40.000Z
|
import ast
import importlib
import logging
import os
import sys
from typing import Dict, Any # noqa: F401
from flask import Flask, Blueprint
from flask_restful import Api
from metadata_service.api.column import ColumnDescriptionAPI
from metadata_service.api.healthcheck import healthcheck
from metadata_service.api.popular_tables import PopularTablesAPI
from metadata_service.api.system import Neo4jDetailAPI
from metadata_service.api.table \
import TableDetailAPI, TableOwnerAPI, TableTagAPI, TableDescriptionAPI
from metadata_service.api.tag import TagAPI
from metadata_service.api.user import UserDetailAPI, UserFollowAPI, UserOwnAPI, UserReadAPI
# For customized flask use below arguments to override.
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
def create_app(*, config_module_class: str) -> Flask:
"""
Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config (TODO: Implement config.py)
:return: Flask
"""
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print('Using requested Flask module {module_name} and class {class_name}'
.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {} # type: Dict[str, Any]
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR),
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = \
os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
logging.info('Using backend {}'.format(app.config.get('PROXY_CLIENT')))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI,
'/table/<path:table_uri>/description',
'/table/<path:table_uri>/description/<path:description_val>')
api.add_resource(TableTagAPI,
'/table/<path:table_uri>/tag',
'/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI,
'/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI,
'/table/<path:table_uri>/column/<column_name>/description',
'/table/<path:table_uri>/column/<column_name>/description/<path:description_val>')
api.add_resource(Neo4jDetailAPI,
'/latest_updated_ts')
api.add_resource(TagAPI,
'/tags/')
api.add_resource(UserDetailAPI,
'/user/<path:user_id>')
api.add_resource(UserFollowAPI,
'/user/<path:user_id>/follow/',
'/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnAPI,
'/user/<path:user_id>/own/',
'/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadAPI,
'/user/<path:user_id>/read/',
'/user/<path:user_id>/read/<resource_type>/<path:table_uri>')
app.register_blueprint(api_bp)
return app
| 43.990196
| 107
| 0.696902
|
import ast
import importlib
import logging
import os
import sys
from typing import Dict, Any
from flask import Flask, Blueprint
from flask_restful import Api
from metadata_service.api.column import ColumnDescriptionAPI
from metadata_service.api.healthcheck import healthcheck
from metadata_service.api.popular_tables import PopularTablesAPI
from metadata_service.api.system import Neo4jDetailAPI
from metadata_service.api.table \
import TableDetailAPI, TableOwnerAPI, TableTagAPI, TableDescriptionAPI
from metadata_service.api.tag import TagAPI
from metadata_service.api.user import UserDetailAPI, UserFollowAPI, UserOwnAPI, UserReadAPI
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
def create_app(*, config_module_class: str) -> Flask:
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print('Using requested Flask module {module_name} and class {class_name}'
.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {}
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR),
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = \
os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
logging.info('Using backend {}'.format(app.config.get('PROXY_CLIENT')))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI,
'/table/<path:table_uri>/description',
'/table/<path:table_uri>/description/<path:description_val>')
api.add_resource(TableTagAPI,
'/table/<path:table_uri>/tag',
'/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI,
'/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI,
'/table/<path:table_uri>/column/<column_name>/description',
'/table/<path:table_uri>/column/<column_name>/description/<path:description_val>')
api.add_resource(Neo4jDetailAPI,
'/latest_updated_ts')
api.add_resource(TagAPI,
'/tags/')
api.add_resource(UserDetailAPI,
'/user/<path:user_id>')
api.add_resource(UserFollowAPI,
'/user/<path:user_id>/follow/',
'/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnAPI,
'/user/<path:user_id>/own/',
'/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadAPI,
'/user/<path:user_id>/read/',
'/user/<path:user_id>/read/<resource_type>/<path:table_uri>')
app.register_blueprint(api_bp)
return app
| true
| true
|
f70b128b87482b3cee9323205fe94afb471a66f3
| 5,846
|
py
|
Python
|
lib-src/lv2/suil/waflib/Tools/c_osx.py
|
joshrose/audacity
|
e2b1a2be6b92661628bbb054f915bc50b211c020
|
[
"CC-BY-3.0"
] | 7,892
|
2015-03-31T09:24:05.000Z
|
2022-03-31T12:30:32.000Z
|
lib-src/lv2/suil/waflib/Tools/c_osx.py
|
joshrose/audacity
|
e2b1a2be6b92661628bbb054f915bc50b211c020
|
[
"CC-BY-3.0"
] | 2,050
|
2015-04-03T13:27:52.000Z
|
2022-03-31T19:14:10.000Z
|
lib-src/lv2/suil/waflib/Tools/c_osx.py
|
joshrose/audacity
|
e2b1a2be6b92661628bbb054f915bc50b211c020
|
[
"CC-BY-3.0"
] | 2,613
|
2015-03-26T11:28:10.000Z
|
2022-03-30T13:17:03.000Z
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2008-2018 (ita)
"""
MacOSX related tools
"""
import os, shutil, platform
from waflib import Task, Utils
from waflib.TaskGen import taskgen_method, feature, after_method, before_method
app_info = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>{app_name}</string>
</dict>
</plist>
'''
"""
plist template
"""
@feature('c', 'cxx')
def set_macosx_deployment_target(self):
"""
see WAF issue 285 and also and also http://trac.macports.org/ticket/17059
"""
if self.env.MACOSX_DEPLOYMENT_TARGET:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET
elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
if Utils.unversioned_sys_platform() == 'darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self, name, out):
"""
Creates bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp`
"""
dir = out.parent.find_or_declare(name)
dir.mkdir()
macos = dir.find_or_declare(['Contents', 'MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name = out.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + '.app'
else:
name = name + '.app'
return name
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
"""
To compile an executable into a Mac application (a .app), set its *mac_app* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_app=True)
To force *all* executables to be transformed into Mac applications::
def build(bld):
bld.env.MACAPP = True
bld.shlib(source='a.c', target='foo')
"""
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
self.apptask = self.create_task('macapp', self.link_task.outputs, n1)
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name
self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755)
if getattr(self, 'mac_files', None):
# this only accepts files; they will be installed as seen from mac_files_root
mac_files_root = getattr(self, 'mac_files_root', None)
if isinstance(mac_files_root, str):
mac_files_root = self.path.find_node(mac_files_root)
if not mac_files_root:
self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root)
res_dir = n1.parent.parent.make_node('Resources')
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name
for node in self.to_nodes(self.mac_files):
relpath = node.path_from(mac_files_root or node.parent)
self.create_task('macapp', node, res_dir.make_node(relpath))
self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node)
if getattr(self.bld, 'is_install', None):
# disable regular binary installation
self.install_task.hasrun = Task.SKIP_ME
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
"""
Creates a :py:class:`waflib.Tools.c_osx.macplist` instance.
"""
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'Info.plist'])
self.plisttask = plisttask = self.create_task('macplist', [], n1)
plisttask.context = {
'app_name': self.link_task.outputs[0].name,
'env': self.env
}
plist_ctx = getattr(self, 'plist_context', None)
if (plist_ctx):
plisttask.context.update(plist_ctx)
if getattr(self, 'mac_plist', False):
node = self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code = self.mac_plist
else:
plisttask.code = app_info
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name
self.add_install_files(install_to=inst_to, install_from=n1)
@feature('cshlib', 'cxxshlib')
@before_method('apply_link', 'propagate_uselib_vars')
def apply_bundle(self):
"""
To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_bundle = True)
To force *all* executables to be transformed into bundles::
def build(bld):
bld.env.MACBUNDLE = True
bld.shlib(source='a.c', target='foo')
"""
if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False):
self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = [] # disable the '-dynamiclib' flag
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN
use = self.use = self.to_list(getattr(self, 'use', []))
if not 'MACBUNDLE' in use:
use.append('MACBUNDLE')
app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
class macapp(Task.Task):
"""
Creates mac applications
"""
color = 'PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath())
class macplist(Task.Task):
"""
Creates plist files
"""
color = 'PINK'
ext_in = ['.bin']
def run(self):
if getattr(self, 'code', None):
txt = self.code
else:
txt = self.inputs[0].read()
context = getattr(self, 'context', {})
txt = txt.format(**context)
self.outputs[0].write(txt)
| 30.134021
| 98
| 0.706295
|
import os, shutil, platform
from waflib import Task, Utils
from waflib.TaskGen import taskgen_method, feature, after_method, before_method
app_info = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>{app_name}</string>
</dict>
</plist>
'''
@feature('c', 'cxx')
def set_macosx_deployment_target(self):
if self.env.MACOSX_DEPLOYMENT_TARGET:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET
elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
if Utils.unversioned_sys_platform() == 'darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self, name, out):
dir = out.parent.find_or_declare(name)
dir.mkdir()
macos = dir.find_or_declare(['Contents', 'MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name = out.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + '.app'
else:
name = name + '.app'
return name
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
self.apptask = self.create_task('macapp', self.link_task.outputs, n1)
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name
self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755)
if getattr(self, 'mac_files', None):
mac_files_root = getattr(self, 'mac_files_root', None)
if isinstance(mac_files_root, str):
mac_files_root = self.path.find_node(mac_files_root)
if not mac_files_root:
self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root)
res_dir = n1.parent.parent.make_node('Resources')
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name
for node in self.to_nodes(self.mac_files):
relpath = node.path_from(mac_files_root or node.parent)
self.create_task('macapp', node, res_dir.make_node(relpath))
self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node)
if getattr(self.bld, 'is_install', None):
self.install_task.hasrun = Task.SKIP_ME
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'Info.plist'])
self.plisttask = plisttask = self.create_task('macplist', [], n1)
plisttask.context = {
'app_name': self.link_task.outputs[0].name,
'env': self.env
}
plist_ctx = getattr(self, 'plist_context', None)
if (plist_ctx):
plisttask.context.update(plist_ctx)
if getattr(self, 'mac_plist', False):
node = self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code = self.mac_plist
else:
plisttask.code = app_info
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name
self.add_install_files(install_to=inst_to, install_from=n1)
@feature('cshlib', 'cxxshlib')
@before_method('apply_link', 'propagate_uselib_vars')
def apply_bundle(self):
if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False):
self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = []
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN
use = self.use = self.to_list(getattr(self, 'use', []))
if not 'MACBUNDLE' in use:
use.append('MACBUNDLE')
app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
class macapp(Task.Task):
color = 'PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath())
class macplist(Task.Task):
color = 'PINK'
ext_in = ['.bin']
def run(self):
if getattr(self, 'code', None):
txt = self.code
else:
txt = self.inputs[0].read()
context = getattr(self, 'context', {})
txt = txt.format(**context)
self.outputs[0].write(txt)
| true
| true
|
f70b13e9224c40649b9bde9fb2b3aa3621b095d9
| 45,694
|
py
|
Python
|
tests/druid_func_tests.py
|
longenouvo/incubator-superset
|
4e998be6956955041a6d36351f602e27d0c8cbeb
|
[
"Apache-2.0"
] | 1
|
2020-04-15T18:13:31.000Z
|
2020-04-15T18:13:31.000Z
|
tests/druid_func_tests.py
|
Odirlei-Stein/incubator-superset
|
52afc33b31475536b287b56d262b9eaa32f479ab
|
[
"Apache-2.0"
] | null | null | null |
tests/druid_func_tests.py
|
Odirlei-Stein/incubator-superset
|
52afc33b31475536b287b56d262b9eaa32f479ab
|
[
"Apache-2.0"
] | 3
|
2020-04-15T16:34:09.000Z
|
2020-06-22T17:26:45.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from unittest.mock import Mock
import superset.connectors.druid.models as models
from superset.connectors.druid.models import DruidColumn, DruidDatasource, DruidMetric
from superset.exceptions import SupersetException
from .base_tests import SupersetTestCase
try:
from pydruid.utils.dimensions import (
MapLookupExtraction,
RegexExtraction,
RegisteredLookupExtraction,
)
import pydruid.utils.postaggregator as postaggs
except ImportError:
pass
def mock_metric(metric_name, is_postagg=False):
metric = Mock()
metric.metric_name = metric_name
metric.metric_type = "postagg" if is_postagg else "metric"
return metric
def emplace(metrics_dict, metric_name, is_postagg=False):
metrics_dict[metric_name] = mock_metric(metric_name, is_postagg)
# Unit tests that can be run without initializing base tests
class DruidFuncTestCase(SupersetTestCase):
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_map(self):
filters = [{"col": "deviceName", "val": ["iPhone X"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "device",
"outputName": "deviceName",
"outputType": "STRING",
"extractionFn": {
"type": "lookup",
"dimension": "dimensionName",
"outputName": "dimensionOutputName",
"replaceMissingValueWith": "missing_value",
"retainMissingValue": False,
"lookup": {
"type": "map",
"map": {
"iPhone10,1": "iPhone 8",
"iPhone10,4": "iPhone 8",
"iPhone10,2": "iPhone 8 Plus",
"iPhone10,5": "iPhone 8 Plus",
"iPhone10,3": "iPhone X",
"iPhone10,6": "iPhone X",
},
"isOneToOne": False,
},
},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="deviceName", dimension_spec_json=spec_json)
column_dict = {"deviceName": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, MapLookupExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
f_ext_fn = f.extraction_function
self.assertEqual(dim_ext_fn["lookup"]["map"], f_ext_fn._mapping)
self.assertEqual(dim_ext_fn["lookup"]["isOneToOne"], f_ext_fn._injective)
self.assertEqual(
dim_ext_fn["replaceMissingValueWith"], f_ext_fn._replace_missing_values
)
self.assertEqual(
dim_ext_fn["retainMissingValue"], f_ext_fn._retain_missing_values
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_regex(self):
filters = [{"col": "buildPrefix", "val": ["22B"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "build",
"outputName": "buildPrefix",
"outputType": "STRING",
"extractionFn": {"type": "regex", "expr": "(^[0-9A-Za-z]{3})"},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="buildPrefix", dimension_spec_json=spec_json)
column_dict = {"buildPrefix": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, RegexExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
f_ext_fn = f.extraction_function
self.assertEqual(dim_ext_fn["expr"], f_ext_fn._expr)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_registered_lookup_extraction(self):
filters = [{"col": "country", "val": ["Spain"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "country_name",
"outputName": "country",
"outputType": "STRING",
"extractionFn": {"type": "registeredLookup", "lookup": "country_name"},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="country", dimension_spec_json=spec_json)
column_dict = {"country": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, RegisteredLookupExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
self.assertEqual(dim_ext_fn["type"], f.extraction_function.extraction_type)
self.assertEqual(dim_ext_fn["lookup"], f.extraction_function._lookup)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_ignores_invalid_filter_objects(self):
filtr = {"col": "col1", "op": "=="}
filters = [filtr]
col = DruidColumn(column_name="col1")
column_dict = {"col1": col}
self.assertIsNone(DruidDatasource.get_filters(filters, [], column_dict))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_in(self):
filtr = {"col": "A", "op": "in", "val": ["a", "b", "c"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIn("filter", res.filter)
self.assertIn("fields", res.filter["filter"])
self.assertEqual("or", res.filter["filter"]["type"])
self.assertEqual(3, len(res.filter["filter"]["fields"]))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_not_in(self):
filtr = {"col": "A", "op": "not in", "val": ["a", "b", "c"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIn("filter", res.filter)
self.assertIn("type", res.filter["filter"])
self.assertEqual("not", res.filter["filter"]["type"])
self.assertIn("field", res.filter["filter"])
self.assertEqual(
3, len(res.filter["filter"]["field"].filter["filter"]["fields"])
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_equals(self):
filtr = {"col": "A", "op": "==", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
self.assertEqual("A", res.filter["filter"]["dimension"])
self.assertEqual("h", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_not_equals(self):
filtr = {"col": "A", "op": "!=", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("not", res.filter["filter"]["type"])
self.assertEqual("h", res.filter["filter"]["field"].filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_bounds_filter(self):
filtr = {"col": "A", "op": ">=", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertFalse(res.filter["filter"]["lowerStrict"])
self.assertEqual("A", res.filter["filter"]["dimension"])
self.assertEqual("h", res.filter["filter"]["lower"])
self.assertFalse(res.filter["filter"]["alphaNumeric"])
filtr["op"] = ">"
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertTrue(res.filter["filter"]["lowerStrict"])
filtr["op"] = "<="
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertFalse(res.filter["filter"]["upperStrict"])
self.assertEqual("h", res.filter["filter"]["upper"])
filtr["op"] = "<"
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertTrue(res.filter["filter"]["upperStrict"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_is_null_filter(self):
filtr = {"col": "A", "op": "IS NULL"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
self.assertEqual("", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_is_not_null_filter(self):
filtr = {"col": "A", "op": "IS NOT NULL"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("not", res.filter["filter"]["type"])
self.assertIn("field", res.filter["filter"])
self.assertEqual(
"selector", res.filter["filter"]["field"].filter["filter"]["type"]
)
self.assertEqual("", res.filter["filter"]["field"].filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_regex_filter(self):
filtr = {"col": "A", "op": "regex", "val": "[abc]"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("regex", res.filter["filter"]["type"])
self.assertEqual("[abc]", res.filter["filter"]["pattern"])
self.assertEqual("A", res.filter["filter"]["dimension"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_composes_multiple_filters(self):
filtr1 = {"col": "A", "op": "!=", "val": "y"}
filtr2 = {"col": "B", "op": "in", "val": ["a", "b", "c"]}
cola = DruidColumn(column_name="A")
colb = DruidColumn(column_name="B")
column_dict = {"A": cola, "B": colb}
res = DruidDatasource.get_filters([filtr1, filtr2], [], column_dict)
self.assertEqual("and", res.filter["filter"]["type"])
self.assertEqual(2, len(res.filter["filter"]["fields"]))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_ignores_in_not_in_with_empty_value(self):
filtr1 = {"col": "A", "op": "in", "val": []}
filtr2 = {"col": "A", "op": "not in", "val": []}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr1, filtr2], [], column_dict)
self.assertIsNone(res)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_equals_for_in_not_in_single_value(self):
filtr = {"col": "A", "op": "in", "val": ["a"]}
cola = DruidColumn(column_name="A")
colb = DruidColumn(column_name="B")
column_dict = {"A": cola, "B": colb}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_handles_arrays_for_string_types(self):
filtr = {"col": "A", "op": "==", "val": ["a", "b"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a", res.filter["filter"]["value"])
filtr = {"col": "A", "op": "==", "val": []}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIsNone(res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_handles_none_for_string_types(self):
filtr = {"col": "A", "op": "==", "val": None}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIsNone(res)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extracts_values_in_quotes(self):
filtr = {"col": "A", "op": "in", "val": ['"a"']}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_keeps_trailing_spaces(self):
filtr = {"col": "A", "op": "in", "val": ["a "]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a ", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_converts_strings_to_num(self):
filtr = {"col": "A", "op": "in", "val": ["6"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], ["A"], column_dict)
self.assertEqual(6, res.filter["filter"]["value"])
filtr = {"col": "A", "op": "==", "val": "6"}
res = DruidDatasource.get_filters([filtr], ["A"], column_dict)
self.assertEqual(6, res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_no_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = []
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
# no groupby calls client.timeseries
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(1, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args = client.timeseries.call_args_list[0][1]
self.assertNotIn("dimensions", called_args)
self.assertIn("post_aggregations", called_args)
# restore functions
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_with_adhoc_metric(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
all_metrics = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(all_metrics, post_aggs))
groupby = []
metrics = [
{
"expressionType": "SIMPLE",
"column": {"type": "DOUBLE", "column_name": "col1"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
# no groupby calls client.timeseries
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(1, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args = client.timeseries.call_args_list[0][1]
self.assertNotIn("dimensions", called_args)
self.assertIn("post_aggregations", called_args)
# restore functions
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_single_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = ["metric1"]
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = ["col1"]
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder.last_query.query_dict = {"mock": 0}
# client.topn is called twice
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
timeseries_limit=100,
client=client,
order_desc=True,
filter=[],
)
self.assertEqual(2, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args_pre = client.topn.call_args_list[0][1]
self.assertNotIn("dimensions", called_args_pre)
self.assertIn("dimension", called_args_pre)
called_args = client.topn.call_args_list[1][1]
self.assertIn("dimension", called_args)
self.assertEqual("col1", called_args["dimension"])
# not order_desc
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
order_desc=False,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(1, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
self.assertIn("dimensions", client.groupby.call_args_list[0][1])
self.assertEqual(["col1"], client.groupby.call_args_list[0][1]["dimensions"])
# order_desc but timeseries and dimension spec
# calls topn with single dimension spec 'dimension'
spec = {"outputName": "hello", "dimension": "matcho"}
spec_json = json.dumps(spec)
col3 = DruidColumn(column_name="col3", dimension_spec_json=spec_json)
ds.columns.append(col3)
groupby = ["col3"]
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
order_desc=True,
timeseries_limit=5,
filter=[],
row_limit=100,
)
self.assertEqual(2, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
self.assertIn("dimension", client.topn.call_args_list[0][1])
self.assertIn("dimension", client.topn.call_args_list[1][1])
# uses dimension for pre query and full spec for final query
self.assertEqual("matcho", client.topn.call_args_list[0][1]["dimension"])
self.assertEqual(spec, client.topn.call_args_list[1][1]["dimension"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_multiple_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = ["col1", "col2"]
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
# no groupby calls client.timeseries
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
row_limit=100,
filter=[],
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(1, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args = client.groupby.call_args_list[0][1]
self.assertIn("dimensions", called_args)
self.assertEqual(["col1", "col2"], called_args["dimensions"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_post_agg_returns_correct_agg_type(self):
get_post_agg = DruidDatasource.get_post_agg
# javascript PostAggregators
function = "function(field1, field2) { return field1 + field2; }"
conf = {
"type": "javascript",
"name": "postagg_name",
"fieldNames": ["field1", "field2"],
"function": function,
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, models.JavascriptPostAggregator))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["type"], "javascript")
self.assertEqual(postagg.post_aggregator["fieldNames"], ["field1", "field2"])
self.assertEqual(postagg.post_aggregator["name"], "postagg_name")
self.assertEqual(postagg.post_aggregator["function"], function)
# Quantile
conf = {"type": "quantile", "name": "postagg_name", "probability": "0.5"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Quantile))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["probability"], "0.5")
# Quantiles
conf = {
"type": "quantiles",
"name": "postagg_name",
"probabilities": "0.4,0.5,0.6",
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Quantiles))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["probabilities"], "0.4,0.5,0.6")
# FieldAccess
conf = {"type": "fieldAccess", "name": "field_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Field))
self.assertEqual(postagg.name, "field_name")
# constant
conf = {"type": "constant", "value": 1234, "name": "postagg_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Const))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["value"], 1234)
# hyperUniqueCardinality
conf = {"type": "hyperUniqueCardinality", "name": "unique_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.HyperUniqueCardinality))
self.assertEqual(postagg.name, "unique_name")
# arithmetic
conf = {
"type": "arithmetic",
"fn": "+",
"fields": ["field1", "field2"],
"name": "postagg_name",
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Postaggregator))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["fn"], "+")
self.assertEqual(postagg.post_aggregator["fields"], ["field1", "field2"])
# custom post aggregator
conf = {"type": "custom", "name": "custom_name", "stuff": "more_stuff"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, models.CustomPostAggregator))
self.assertEqual(postagg.name, "custom_name")
self.assertEqual(postagg.post_aggregator["stuff"], "more_stuff")
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_find_postaggs_for_returns_postaggs_and_removes(self):
find_postaggs_for = DruidDatasource.find_postaggs_for
postagg_names = set(["pa2", "pa3", "pa4", "m1", "m2", "m3", "m4"])
metrics = {}
for i in range(1, 6):
emplace(metrics, "pa" + str(i), True)
emplace(metrics, "m" + str(i), False)
postagg_list = find_postaggs_for(postagg_names, metrics)
self.assertEqual(3, len(postagg_list))
self.assertEqual(4, len(postagg_names))
expected_metrics = ["m1", "m2", "m3", "m4"]
expected_postaggs = set(["pa2", "pa3", "pa4"])
for postagg in postagg_list:
expected_postaggs.remove(postagg.metric_name)
for metric in expected_metrics:
postagg_names.remove(metric)
self.assertEqual(0, len(expected_postaggs))
self.assertEqual(0, len(postagg_names))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_recursive_get_fields(self):
conf = {
"type": "quantile",
"fieldName": "f1",
"field": {
"type": "custom",
"fields": [
{"type": "fieldAccess", "fieldName": "f2"},
{"type": "fieldAccess", "fieldName": "f3"},
{
"type": "quantiles",
"fieldName": "f4",
"field": {"type": "custom"},
},
{
"type": "custom",
"fields": [
{"type": "fieldAccess", "fieldName": "f5"},
{
"type": "fieldAccess",
"fieldName": "f2",
"fields": [
{"type": "fieldAccess", "fieldName": "f3"},
{"type": "fieldIgnoreMe", "fieldName": "f6"},
],
},
],
},
],
},
}
fields = DruidDatasource.recursive_get_fields(conf)
expected = set(["f1", "f2", "f3", "f4", "f5"])
self.assertEqual(5, len(fields))
for field in fields:
expected.remove(field)
self.assertEqual(0, len(expected))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_metrics_and_post_aggs_tree(self):
metrics = ["A", "B", "m1", "m2"]
metrics_dict = {}
for i in range(ord("A"), ord("K") + 1):
emplace(metrics_dict, chr(i), True)
for i in range(1, 10):
emplace(metrics_dict, "m" + str(i), False)
def depends_on(index, fields):
dependents = fields if isinstance(fields, list) else [fields]
metrics_dict[index].json_obj = {"fieldNames": dependents}
depends_on("A", ["m1", "D", "C"])
depends_on("B", ["B", "C", "E", "F", "m3"])
depends_on("C", ["H", "I"])
depends_on("D", ["m2", "m5", "G", "C"])
depends_on("E", ["H", "I", "J"])
depends_on("F", ["J", "m5"])
depends_on("G", ["m4", "m7", "m6", "A"])
depends_on("H", ["A", "m4", "I"])
depends_on("I", ["H", "K"])
depends_on("J", "K")
depends_on("K", ["m8", "m9"])
aggs, postaggs = DruidDatasource.metrics_and_post_aggs(metrics, metrics_dict)
expected_metrics = set(aggs.keys())
self.assertEqual(9, len(aggs))
for i in range(1, 10):
expected_metrics.remove("m" + str(i))
self.assertEqual(0, len(expected_metrics))
self.assertEqual(11, len(postaggs))
for i in range(ord("A"), ord("K") + 1):
del postaggs[chr(i)]
self.assertEqual(0, len(postaggs))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_metrics_and_post_aggs(self):
"""
Test generation of metrics and post-aggregations from an initial list
of superset metrics (which may include the results of either). This
primarily tests that specifying a post-aggregator metric will also
require the raw aggregation of the associated druid metric column.
"""
metrics_dict = {
"unused_count": DruidMetric(
metric_name="unused_count",
verbose_name="COUNT(*)",
metric_type="count",
json=json.dumps({"type": "count", "name": "unused_count"}),
),
"some_sum": DruidMetric(
metric_name="some_sum",
verbose_name="SUM(*)",
metric_type="sum",
json=json.dumps({"type": "sum", "name": "sum"}),
),
"a_histogram": DruidMetric(
metric_name="a_histogram",
verbose_name="APPROXIMATE_HISTOGRAM(*)",
metric_type="approxHistogramFold",
json=json.dumps({"type": "approxHistogramFold", "name": "a_histogram"}),
),
"aCustomMetric": DruidMetric(
metric_name="aCustomMetric",
verbose_name="MY_AWESOME_METRIC(*)",
metric_type="aCustomType",
json=json.dumps({"type": "customMetric", "name": "aCustomMetric"}),
),
"quantile_p95": DruidMetric(
metric_name="quantile_p95",
verbose_name="P95(*)",
metric_type="postagg",
json=json.dumps(
{
"type": "quantile",
"probability": 0.95,
"name": "p95",
"fieldName": "a_histogram",
}
),
),
"aCustomPostAgg": DruidMetric(
metric_name="aCustomPostAgg",
verbose_name="CUSTOM_POST_AGG(*)",
metric_type="postagg",
json=json.dumps(
{
"type": "customPostAgg",
"name": "aCustomPostAgg",
"field": {"type": "fieldAccess", "fieldName": "aCustomMetric"},
}
),
),
}
adhoc_metric = {
"expressionType": "SIMPLE",
"column": {"type": "DOUBLE", "column_name": "value"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
metrics = ["some_sum"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == {"some_sum"}
assert post_aggs == {}
metrics = [adhoc_metric]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == set([adhoc_metric["label"]])
assert post_aggs == {}
metrics = ["some_sum", adhoc_metric]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == {"some_sum", adhoc_metric["label"]}
assert post_aggs == {}
metrics = ["quantile_p95"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
result_postaggs = set(["quantile_p95"])
assert set(saved_metrics.keys()) == {"a_histogram"}
assert set(post_aggs.keys()) == result_postaggs
metrics = ["aCustomPostAgg"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
result_postaggs = set(["aCustomPostAgg"])
assert set(saved_metrics.keys()) == {"aCustomMetric"}
assert set(post_aggs.keys()) == result_postaggs
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_druid_type_from_adhoc_metric(self):
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "DOUBLE", "column_name": "value"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
)
assert druid_type == "doubleSum"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "LONG", "column_name": "value"},
"aggregate": "MAX",
"label": "My Adhoc Metric",
}
)
assert druid_type == "longMax"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "VARCHAR(255)", "column_name": "value"},
"aggregate": "COUNT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "count"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "VARCHAR(255)", "column_name": "value"},
"aggregate": "COUNT_DISTINCT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "cardinality"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "hyperUnique", "column_name": "value"},
"aggregate": "COUNT_DISTINCT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "hyperUnique"
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_order_by_metrics(self):
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
from_dttm = Mock()
to_dttm = Mock()
ds = DruidDatasource(datasource_name="datasource")
ds.get_having_filters = Mock(return_value=[])
dim1 = DruidColumn(column_name="dim1")
dim2 = DruidColumn(column_name="dim2")
metrics_dict = {
"count1": DruidMetric(
metric_name="count1",
metric_type="count",
json=json.dumps({"type": "count", "name": "count1"}),
),
"sum1": DruidMetric(
metric_name="sum1",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum1"}),
),
"sum2": DruidMetric(
metric_name="sum2",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum2"}),
),
"div1": DruidMetric(
metric_name="div1",
metric_type="postagg",
json=json.dumps(
{
"fn": "/",
"type": "arithmetic",
"name": "div1",
"fields": [
{"fieldName": "sum1", "type": "fieldAccess"},
{"fieldName": "sum2", "type": "fieldAccess"},
],
}
),
),
}
ds.columns = [dim1, dim2]
ds.metrics = list(metrics_dict.values())
groupby = ["dim1"]
metrics = ["count1"]
granularity = "all"
# get the counts of the top 5 'dim1's, order by 'sum1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="sum1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.topn.call_args_list[0][1]
self.assertEqual("dim1", qry_obj["dimension"])
self.assertEqual("sum1", qry_obj["metric"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1"}, set(aggregations.keys()))
self.assertEqual(set(), set(post_aggregations.keys()))
# get the counts of the top 5 'dim1's, order by 'div1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="div1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.topn.call_args_list[1][1]
self.assertEqual("dim1", qry_obj["dimension"])
self.assertEqual("div1", qry_obj["metric"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1", "sum2"}, set(aggregations.keys()))
self.assertEqual({"div1"}, set(post_aggregations.keys()))
groupby = ["dim1", "dim2"]
# get the counts of the top 5 ['dim1', 'dim2']s, order by 'sum1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="sum1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.groupby.call_args_list[0][1]
self.assertEqual({"dim1", "dim2"}, set(qry_obj["dimensions"]))
self.assertEqual("sum1", qry_obj["limit_spec"]["columns"][0]["dimension"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1"}, set(aggregations.keys()))
self.assertEqual(set(), set(post_aggregations.keys()))
# get the counts of the top 5 ['dim1', 'dim2']s, order by 'div1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="div1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.groupby.call_args_list[1][1]
self.assertEqual({"dim1", "dim2"}, set(qry_obj["dimensions"]))
self.assertEqual("div1", qry_obj["limit_spec"]["columns"][0]["dimension"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1", "sum2"}, set(aggregations.keys()))
self.assertEqual({"div1"}, set(post_aggregations.keys()))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_aggregations(self):
ds = DruidDatasource(datasource_name="datasource")
metrics_dict = {
"sum1": DruidMetric(
metric_name="sum1",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum1"}),
),
"sum2": DruidMetric(
metric_name="sum2",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum2"}),
),
"div1": DruidMetric(
metric_name="div1",
metric_type="postagg",
json=json.dumps(
{
"fn": "/",
"type": "arithmetic",
"name": "div1",
"fields": [
{"fieldName": "sum1", "type": "fieldAccess"},
{"fieldName": "sum2", "type": "fieldAccess"},
],
}
),
),
}
metric_names = ["sum1", "sum2"]
aggs = ds.get_aggregations(metrics_dict, metric_names)
expected_agg = {name: metrics_dict[name].json_obj for name in metric_names}
self.assertEqual(expected_agg, aggs)
metric_names = ["sum1", "col1"]
self.assertRaises(
SupersetException, ds.get_aggregations, metrics_dict, metric_names
)
metric_names = ["sum1", "div1"]
self.assertRaises(
SupersetException, ds.get_aggregations, metrics_dict, metric_names
)
| 40.798214
| 88
| 0.576093
|
import json
import unittest
from unittest.mock import Mock
import superset.connectors.druid.models as models
from superset.connectors.druid.models import DruidColumn, DruidDatasource, DruidMetric
from superset.exceptions import SupersetException
from .base_tests import SupersetTestCase
try:
from pydruid.utils.dimensions import (
MapLookupExtraction,
RegexExtraction,
RegisteredLookupExtraction,
)
import pydruid.utils.postaggregator as postaggs
except ImportError:
pass
def mock_metric(metric_name, is_postagg=False):
metric = Mock()
metric.metric_name = metric_name
metric.metric_type = "postagg" if is_postagg else "metric"
return metric
def emplace(metrics_dict, metric_name, is_postagg=False):
metrics_dict[metric_name] = mock_metric(metric_name, is_postagg)
class DruidFuncTestCase(SupersetTestCase):
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_map(self):
filters = [{"col": "deviceName", "val": ["iPhone X"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "device",
"outputName": "deviceName",
"outputType": "STRING",
"extractionFn": {
"type": "lookup",
"dimension": "dimensionName",
"outputName": "dimensionOutputName",
"replaceMissingValueWith": "missing_value",
"retainMissingValue": False,
"lookup": {
"type": "map",
"map": {
"iPhone10,1": "iPhone 8",
"iPhone10,4": "iPhone 8",
"iPhone10,2": "iPhone 8 Plus",
"iPhone10,5": "iPhone 8 Plus",
"iPhone10,3": "iPhone X",
"iPhone10,6": "iPhone X",
},
"isOneToOne": False,
},
},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="deviceName", dimension_spec_json=spec_json)
column_dict = {"deviceName": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, MapLookupExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
f_ext_fn = f.extraction_function
self.assertEqual(dim_ext_fn["lookup"]["map"], f_ext_fn._mapping)
self.assertEqual(dim_ext_fn["lookup"]["isOneToOne"], f_ext_fn._injective)
self.assertEqual(
dim_ext_fn["replaceMissingValueWith"], f_ext_fn._replace_missing_values
)
self.assertEqual(
dim_ext_fn["retainMissingValue"], f_ext_fn._retain_missing_values
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_regex(self):
filters = [{"col": "buildPrefix", "val": ["22B"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "build",
"outputName": "buildPrefix",
"outputType": "STRING",
"extractionFn": {"type": "regex", "expr": "(^[0-9A-Za-z]{3})"},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="buildPrefix", dimension_spec_json=spec_json)
column_dict = {"buildPrefix": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, RegexExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
f_ext_fn = f.extraction_function
self.assertEqual(dim_ext_fn["expr"], f_ext_fn._expr)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_registered_lookup_extraction(self):
filters = [{"col": "country", "val": ["Spain"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "country_name",
"outputName": "country",
"outputType": "STRING",
"extractionFn": {"type": "registeredLookup", "lookup": "country_name"},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="country", dimension_spec_json=spec_json)
column_dict = {"country": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, RegisteredLookupExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
self.assertEqual(dim_ext_fn["type"], f.extraction_function.extraction_type)
self.assertEqual(dim_ext_fn["lookup"], f.extraction_function._lookup)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_ignores_invalid_filter_objects(self):
filtr = {"col": "col1", "op": "=="}
filters = [filtr]
col = DruidColumn(column_name="col1")
column_dict = {"col1": col}
self.assertIsNone(DruidDatasource.get_filters(filters, [], column_dict))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_in(self):
filtr = {"col": "A", "op": "in", "val": ["a", "b", "c"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIn("filter", res.filter)
self.assertIn("fields", res.filter["filter"])
self.assertEqual("or", res.filter["filter"]["type"])
self.assertEqual(3, len(res.filter["filter"]["fields"]))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_not_in(self):
filtr = {"col": "A", "op": "not in", "val": ["a", "b", "c"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIn("filter", res.filter)
self.assertIn("type", res.filter["filter"])
self.assertEqual("not", res.filter["filter"]["type"])
self.assertIn("field", res.filter["filter"])
self.assertEqual(
3, len(res.filter["filter"]["field"].filter["filter"]["fields"])
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_equals(self):
filtr = {"col": "A", "op": "==", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
self.assertEqual("A", res.filter["filter"]["dimension"])
self.assertEqual("h", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_not_equals(self):
filtr = {"col": "A", "op": "!=", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("not", res.filter["filter"]["type"])
self.assertEqual("h", res.filter["filter"]["field"].filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_bounds_filter(self):
filtr = {"col": "A", "op": ">=", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertFalse(res.filter["filter"]["lowerStrict"])
self.assertEqual("A", res.filter["filter"]["dimension"])
self.assertEqual("h", res.filter["filter"]["lower"])
self.assertFalse(res.filter["filter"]["alphaNumeric"])
filtr["op"] = ">"
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertTrue(res.filter["filter"]["lowerStrict"])
filtr["op"] = "<="
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertFalse(res.filter["filter"]["upperStrict"])
self.assertEqual("h", res.filter["filter"]["upper"])
filtr["op"] = "<"
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertTrue(res.filter["filter"]["upperStrict"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_is_null_filter(self):
filtr = {"col": "A", "op": "IS NULL"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
self.assertEqual("", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_is_not_null_filter(self):
filtr = {"col": "A", "op": "IS NOT NULL"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("not", res.filter["filter"]["type"])
self.assertIn("field", res.filter["filter"])
self.assertEqual(
"selector", res.filter["filter"]["field"].filter["filter"]["type"]
)
self.assertEqual("", res.filter["filter"]["field"].filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_regex_filter(self):
filtr = {"col": "A", "op": "regex", "val": "[abc]"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("regex", res.filter["filter"]["type"])
self.assertEqual("[abc]", res.filter["filter"]["pattern"])
self.assertEqual("A", res.filter["filter"]["dimension"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_composes_multiple_filters(self):
filtr1 = {"col": "A", "op": "!=", "val": "y"}
filtr2 = {"col": "B", "op": "in", "val": ["a", "b", "c"]}
cola = DruidColumn(column_name="A")
colb = DruidColumn(column_name="B")
column_dict = {"A": cola, "B": colb}
res = DruidDatasource.get_filters([filtr1, filtr2], [], column_dict)
self.assertEqual("and", res.filter["filter"]["type"])
self.assertEqual(2, len(res.filter["filter"]["fields"]))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_ignores_in_not_in_with_empty_value(self):
filtr1 = {"col": "A", "op": "in", "val": []}
filtr2 = {"col": "A", "op": "not in", "val": []}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr1, filtr2], [], column_dict)
self.assertIsNone(res)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_equals_for_in_not_in_single_value(self):
filtr = {"col": "A", "op": "in", "val": ["a"]}
cola = DruidColumn(column_name="A")
colb = DruidColumn(column_name="B")
column_dict = {"A": cola, "B": colb}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_handles_arrays_for_string_types(self):
filtr = {"col": "A", "op": "==", "val": ["a", "b"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a", res.filter["filter"]["value"])
filtr = {"col": "A", "op": "==", "val": []}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIsNone(res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_handles_none_for_string_types(self):
filtr = {"col": "A", "op": "==", "val": None}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIsNone(res)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extracts_values_in_quotes(self):
filtr = {"col": "A", "op": "in", "val": ['"a"']}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_keeps_trailing_spaces(self):
filtr = {"col": "A", "op": "in", "val": ["a "]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a ", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_converts_strings_to_num(self):
filtr = {"col": "A", "op": "in", "val": ["6"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], ["A"], column_dict)
self.assertEqual(6, res.filter["filter"]["value"])
filtr = {"col": "A", "op": "==", "val": "6"}
res = DruidDatasource.get_filters([filtr], ["A"], column_dict)
self.assertEqual(6, res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_no_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = []
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(1, len(client.timeseries.call_args_list))
called_args = client.timeseries.call_args_list[0][1]
self.assertNotIn("dimensions", called_args)
self.assertIn("post_aggregations", called_args)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_with_adhoc_metric(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
all_metrics = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(all_metrics, post_aggs))
groupby = []
metrics = [
{
"expressionType": "SIMPLE",
"column": {"type": "DOUBLE", "column_name": "col1"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(1, len(client.timeseries.call_args_list))
called_args = client.timeseries.call_args_list[0][1]
self.assertNotIn("dimensions", called_args)
self.assertIn("post_aggregations", called_args)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_single_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = ["metric1"]
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = ["col1"]
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
timeseries_limit=100,
client=client,
order_desc=True,
filter=[],
)
self.assertEqual(2, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
called_args_pre = client.topn.call_args_list[0][1]
self.assertNotIn("dimensions", called_args_pre)
self.assertIn("dimension", called_args_pre)
called_args = client.topn.call_args_list[1][1]
self.assertIn("dimension", called_args)
self.assertEqual("col1", called_args["dimension"])
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
order_desc=False,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(1, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
self.assertIn("dimensions", client.groupby.call_args_list[0][1])
self.assertEqual(["col1"], client.groupby.call_args_list[0][1]["dimensions"])
spec = {"outputName": "hello", "dimension": "matcho"}
spec_json = json.dumps(spec)
col3 = DruidColumn(column_name="col3", dimension_spec_json=spec_json)
ds.columns.append(col3)
groupby = ["col3"]
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
order_desc=True,
timeseries_limit=5,
filter=[],
row_limit=100,
)
self.assertEqual(2, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
self.assertIn("dimension", client.topn.call_args_list[0][1])
self.assertIn("dimension", client.topn.call_args_list[1][1])
self.assertEqual("matcho", client.topn.call_args_list[0][1]["dimension"])
self.assertEqual(spec, client.topn.call_args_list[1][1]["dimension"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_multiple_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = ["col1", "col2"]
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
row_limit=100,
filter=[],
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(1, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
called_args = client.groupby.call_args_list[0][1]
self.assertIn("dimensions", called_args)
self.assertEqual(["col1", "col2"], called_args["dimensions"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_post_agg_returns_correct_agg_type(self):
get_post_agg = DruidDatasource.get_post_agg
function = "function(field1, field2) { return field1 + field2; }"
conf = {
"type": "javascript",
"name": "postagg_name",
"fieldNames": ["field1", "field2"],
"function": function,
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, models.JavascriptPostAggregator))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["type"], "javascript")
self.assertEqual(postagg.post_aggregator["fieldNames"], ["field1", "field2"])
self.assertEqual(postagg.post_aggregator["name"], "postagg_name")
self.assertEqual(postagg.post_aggregator["function"], function)
conf = {"type": "quantile", "name": "postagg_name", "probability": "0.5"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Quantile))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["probability"], "0.5")
conf = {
"type": "quantiles",
"name": "postagg_name",
"probabilities": "0.4,0.5,0.6",
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Quantiles))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["probabilities"], "0.4,0.5,0.6")
conf = {"type": "fieldAccess", "name": "field_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Field))
self.assertEqual(postagg.name, "field_name")
conf = {"type": "constant", "value": 1234, "name": "postagg_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Const))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["value"], 1234)
conf = {"type": "hyperUniqueCardinality", "name": "unique_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.HyperUniqueCardinality))
self.assertEqual(postagg.name, "unique_name")
conf = {
"type": "arithmetic",
"fn": "+",
"fields": ["field1", "field2"],
"name": "postagg_name",
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Postaggregator))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["fn"], "+")
self.assertEqual(postagg.post_aggregator["fields"], ["field1", "field2"])
conf = {"type": "custom", "name": "custom_name", "stuff": "more_stuff"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, models.CustomPostAggregator))
self.assertEqual(postagg.name, "custom_name")
self.assertEqual(postagg.post_aggregator["stuff"], "more_stuff")
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_find_postaggs_for_returns_postaggs_and_removes(self):
find_postaggs_for = DruidDatasource.find_postaggs_for
postagg_names = set(["pa2", "pa3", "pa4", "m1", "m2", "m3", "m4"])
metrics = {}
for i in range(1, 6):
emplace(metrics, "pa" + str(i), True)
emplace(metrics, "m" + str(i), False)
postagg_list = find_postaggs_for(postagg_names, metrics)
self.assertEqual(3, len(postagg_list))
self.assertEqual(4, len(postagg_names))
expected_metrics = ["m1", "m2", "m3", "m4"]
expected_postaggs = set(["pa2", "pa3", "pa4"])
for postagg in postagg_list:
expected_postaggs.remove(postagg.metric_name)
for metric in expected_metrics:
postagg_names.remove(metric)
self.assertEqual(0, len(expected_postaggs))
self.assertEqual(0, len(postagg_names))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_recursive_get_fields(self):
conf = {
"type": "quantile",
"fieldName": "f1",
"field": {
"type": "custom",
"fields": [
{"type": "fieldAccess", "fieldName": "f2"},
{"type": "fieldAccess", "fieldName": "f3"},
{
"type": "quantiles",
"fieldName": "f4",
"field": {"type": "custom"},
},
{
"type": "custom",
"fields": [
{"type": "fieldAccess", "fieldName": "f5"},
{
"type": "fieldAccess",
"fieldName": "f2",
"fields": [
{"type": "fieldAccess", "fieldName": "f3"},
{"type": "fieldIgnoreMe", "fieldName": "f6"},
],
},
],
},
],
},
}
fields = DruidDatasource.recursive_get_fields(conf)
expected = set(["f1", "f2", "f3", "f4", "f5"])
self.assertEqual(5, len(fields))
for field in fields:
expected.remove(field)
self.assertEqual(0, len(expected))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_metrics_and_post_aggs_tree(self):
metrics = ["A", "B", "m1", "m2"]
metrics_dict = {}
for i in range(ord("A"), ord("K") + 1):
emplace(metrics_dict, chr(i), True)
for i in range(1, 10):
emplace(metrics_dict, "m" + str(i), False)
def depends_on(index, fields):
dependents = fields if isinstance(fields, list) else [fields]
metrics_dict[index].json_obj = {"fieldNames": dependents}
depends_on("A", ["m1", "D", "C"])
depends_on("B", ["B", "C", "E", "F", "m3"])
depends_on("C", ["H", "I"])
depends_on("D", ["m2", "m5", "G", "C"])
depends_on("E", ["H", "I", "J"])
depends_on("F", ["J", "m5"])
depends_on("G", ["m4", "m7", "m6", "A"])
depends_on("H", ["A", "m4", "I"])
depends_on("I", ["H", "K"])
depends_on("J", "K")
depends_on("K", ["m8", "m9"])
aggs, postaggs = DruidDatasource.metrics_and_post_aggs(metrics, metrics_dict)
expected_metrics = set(aggs.keys())
self.assertEqual(9, len(aggs))
for i in range(1, 10):
expected_metrics.remove("m" + str(i))
self.assertEqual(0, len(expected_metrics))
self.assertEqual(11, len(postaggs))
for i in range(ord("A"), ord("K") + 1):
del postaggs[chr(i)]
self.assertEqual(0, len(postaggs))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_metrics_and_post_aggs(self):
metrics_dict = {
"unused_count": DruidMetric(
metric_name="unused_count",
verbose_name="COUNT(*)",
metric_type="count",
json=json.dumps({"type": "count", "name": "unused_count"}),
),
"some_sum": DruidMetric(
metric_name="some_sum",
verbose_name="SUM(*)",
metric_type="sum",
json=json.dumps({"type": "sum", "name": "sum"}),
),
"a_histogram": DruidMetric(
metric_name="a_histogram",
verbose_name="APPROXIMATE_HISTOGRAM(*)",
metric_type="approxHistogramFold",
json=json.dumps({"type": "approxHistogramFold", "name": "a_histogram"}),
),
"aCustomMetric": DruidMetric(
metric_name="aCustomMetric",
verbose_name="MY_AWESOME_METRIC(*)",
metric_type="aCustomType",
json=json.dumps({"type": "customMetric", "name": "aCustomMetric"}),
),
"quantile_p95": DruidMetric(
metric_name="quantile_p95",
verbose_name="P95(*)",
metric_type="postagg",
json=json.dumps(
{
"type": "quantile",
"probability": 0.95,
"name": "p95",
"fieldName": "a_histogram",
}
),
),
"aCustomPostAgg": DruidMetric(
metric_name="aCustomPostAgg",
verbose_name="CUSTOM_POST_AGG(*)",
metric_type="postagg",
json=json.dumps(
{
"type": "customPostAgg",
"name": "aCustomPostAgg",
"field": {"type": "fieldAccess", "fieldName": "aCustomMetric"},
}
),
),
}
adhoc_metric = {
"expressionType": "SIMPLE",
"column": {"type": "DOUBLE", "column_name": "value"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
metrics = ["some_sum"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == {"some_sum"}
assert post_aggs == {}
metrics = [adhoc_metric]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == set([adhoc_metric["label"]])
assert post_aggs == {}
metrics = ["some_sum", adhoc_metric]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == {"some_sum", adhoc_metric["label"]}
assert post_aggs == {}
metrics = ["quantile_p95"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
result_postaggs = set(["quantile_p95"])
assert set(saved_metrics.keys()) == {"a_histogram"}
assert set(post_aggs.keys()) == result_postaggs
metrics = ["aCustomPostAgg"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
result_postaggs = set(["aCustomPostAgg"])
assert set(saved_metrics.keys()) == {"aCustomMetric"}
assert set(post_aggs.keys()) == result_postaggs
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_druid_type_from_adhoc_metric(self):
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "DOUBLE", "column_name": "value"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
)
assert druid_type == "doubleSum"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "LONG", "column_name": "value"},
"aggregate": "MAX",
"label": "My Adhoc Metric",
}
)
assert druid_type == "longMax"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "VARCHAR(255)", "column_name": "value"},
"aggregate": "COUNT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "count"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "VARCHAR(255)", "column_name": "value"},
"aggregate": "COUNT_DISTINCT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "cardinality"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "hyperUnique", "column_name": "value"},
"aggregate": "COUNT_DISTINCT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "hyperUnique"
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_order_by_metrics(self):
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
from_dttm = Mock()
to_dttm = Mock()
ds = DruidDatasource(datasource_name="datasource")
ds.get_having_filters = Mock(return_value=[])
dim1 = DruidColumn(column_name="dim1")
dim2 = DruidColumn(column_name="dim2")
metrics_dict = {
"count1": DruidMetric(
metric_name="count1",
metric_type="count",
json=json.dumps({"type": "count", "name": "count1"}),
),
"sum1": DruidMetric(
metric_name="sum1",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum1"}),
),
"sum2": DruidMetric(
metric_name="sum2",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum2"}),
),
"div1": DruidMetric(
metric_name="div1",
metric_type="postagg",
json=json.dumps(
{
"fn": "/",
"type": "arithmetic",
"name": "div1",
"fields": [
{"fieldName": "sum1", "type": "fieldAccess"},
{"fieldName": "sum2", "type": "fieldAccess"},
],
}
),
),
}
ds.columns = [dim1, dim2]
ds.metrics = list(metrics_dict.values())
groupby = ["dim1"]
metrics = ["count1"]
granularity = "all"
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="sum1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.topn.call_args_list[0][1]
self.assertEqual("dim1", qry_obj["dimension"])
self.assertEqual("sum1", qry_obj["metric"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1"}, set(aggregations.keys()))
self.assertEqual(set(), set(post_aggregations.keys()))
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="div1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.topn.call_args_list[1][1]
self.assertEqual("dim1", qry_obj["dimension"])
self.assertEqual("div1", qry_obj["metric"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1", "sum2"}, set(aggregations.keys()))
self.assertEqual({"div1"}, set(post_aggregations.keys()))
groupby = ["dim1", "dim2"]
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="sum1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.groupby.call_args_list[0][1]
self.assertEqual({"dim1", "dim2"}, set(qry_obj["dimensions"]))
self.assertEqual("sum1", qry_obj["limit_spec"]["columns"][0]["dimension"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1"}, set(aggregations.keys()))
self.assertEqual(set(), set(post_aggregations.keys()))
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="div1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.groupby.call_args_list[1][1]
self.assertEqual({"dim1", "dim2"}, set(qry_obj["dimensions"]))
self.assertEqual("div1", qry_obj["limit_spec"]["columns"][0]["dimension"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1", "sum2"}, set(aggregations.keys()))
self.assertEqual({"div1"}, set(post_aggregations.keys()))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_aggregations(self):
ds = DruidDatasource(datasource_name="datasource")
metrics_dict = {
"sum1": DruidMetric(
metric_name="sum1",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum1"}),
),
"sum2": DruidMetric(
metric_name="sum2",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum2"}),
),
"div1": DruidMetric(
metric_name="div1",
metric_type="postagg",
json=json.dumps(
{
"fn": "/",
"type": "arithmetic",
"name": "div1",
"fields": [
{"fieldName": "sum1", "type": "fieldAccess"},
{"fieldName": "sum2", "type": "fieldAccess"},
],
}
),
),
}
metric_names = ["sum1", "sum2"]
aggs = ds.get_aggregations(metrics_dict, metric_names)
expected_agg = {name: metrics_dict[name].json_obj for name in metric_names}
self.assertEqual(expected_agg, aggs)
metric_names = ["sum1", "col1"]
self.assertRaises(
SupersetException, ds.get_aggregations, metrics_dict, metric_names
)
metric_names = ["sum1", "div1"]
self.assertRaises(
SupersetException, ds.get_aggregations, metrics_dict, metric_names
)
| true
| true
|
f70b14387afbfb856a02ada0d56f10e597f6b54c
| 668
|
py
|
Python
|
esuits/index/views.py
|
junkhp/esuites_database_modification
|
ac2b706a7cc8488cbe83a77d7ce062f5b8228463
|
[
"MIT"
] | 4
|
2020-11-02T18:25:13.000Z
|
2021-03-15T07:56:41.000Z
|
esuits/index/views.py
|
junkhp/esuites_database_modification
|
ac2b706a7cc8488cbe83a77d7ce062f5b8228463
|
[
"MIT"
] | 9
|
2021-02-01T03:20:59.000Z
|
2021-03-06T08:15:04.000Z
|
esuits/index/views.py
|
junkhp/esuites_database_modification
|
ac2b706a7cc8488cbe83a77d7ce062f5b8228463
|
[
"MIT"
] | 4
|
2020-11-03T16:52:37.000Z
|
2020-11-11T16:31:26.000Z
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import ListView, DetailView, DeleteView, UpdateView
from django import forms
from django.urls import reverse_lazy, reverse
from django.views import View
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from pprint import pprint
from django.db.models import Q
# Create your views here.
class IndexView(View):
'''トップページを表示'''
def get(self, request):
template_name = 'esuits/index.html'
return render(request, template_name)
| 31.809524
| 77
| 0.791916
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import ListView, DetailView, DeleteView, UpdateView
from django import forms
from django.urls import reverse_lazy, reverse
from django.views import View
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from pprint import pprint
from django.db.models import Q
class IndexView(View):
def get(self, request):
template_name = 'esuits/index.html'
return render(request, template_name)
| true
| true
|
f70b15354c78daddad253c8e050db6e8e7e66e50
| 2,094
|
py
|
Python
|
tests/test_local.py
|
gaolichuang/py-essential
|
9e2b803f878f1cb3686dd365a16b943594a1cd82
|
[
"Apache-2.0"
] | 1
|
2015-01-11T06:43:02.000Z
|
2015-01-11T06:43:02.000Z
|
tests/test_local.py
|
gaolichuang/py-essential
|
9e2b803f878f1cb3686dd365a16b943594a1cd82
|
[
"Apache-2.0"
] | null | null | null |
tests/test_local.py
|
gaolichuang/py-essential
|
9e2b803f878f1cb3686dd365a16b943594a1cd82
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from six import moves
from essential import local
from essential import test
class Dict(dict):
"""Make weak referencable object."""
pass
class LocalStoreTestCase(test.BaseTestCase):
v1 = Dict(a='1')
v2 = Dict(a='2')
v3 = Dict(a='3')
def setUp(self):
super(LocalStoreTestCase, self).setUp()
# NOTE(mrodden): we need to make sure that local store
# gets imported in the current python context we are
# testing in (eventlet vs normal python threading) so
# we test the correct type of local store for the current
# threading model
moves.reload_module(local)
def test_thread_unique_storage(self):
"""Make sure local store holds thread specific values."""
expected_set = []
local.store.a = self.v1
def do_something():
local.store.a = self.v2
expected_set.append(getattr(local.store, 'a'))
def do_something2():
local.store.a = self.v3
expected_set.append(getattr(local.store, 'a'))
t1 = threading.Thread(target=do_something)
t2 = threading.Thread(target=do_something2)
t1.start()
t2.start()
t1.join()
t2.join()
expected_set.append(getattr(local.store, 'a'))
self.assertTrue(self.v1 in expected_set)
self.assertTrue(self.v2 in expected_set)
self.assertTrue(self.v3 in expected_set)
| 30.794118
| 78
| 0.658548
|
import threading
from six import moves
from essential import local
from essential import test
class Dict(dict):
pass
class LocalStoreTestCase(test.BaseTestCase):
v1 = Dict(a='1')
v2 = Dict(a='2')
v3 = Dict(a='3')
def setUp(self):
super(LocalStoreTestCase, self).setUp()
moves.reload_module(local)
def test_thread_unique_storage(self):
expected_set = []
local.store.a = self.v1
def do_something():
local.store.a = self.v2
expected_set.append(getattr(local.store, 'a'))
def do_something2():
local.store.a = self.v3
expected_set.append(getattr(local.store, 'a'))
t1 = threading.Thread(target=do_something)
t2 = threading.Thread(target=do_something2)
t1.start()
t2.start()
t1.join()
t2.join()
expected_set.append(getattr(local.store, 'a'))
self.assertTrue(self.v1 in expected_set)
self.assertTrue(self.v2 in expected_set)
self.assertTrue(self.v3 in expected_set)
| true
| true
|
f70b15ad06c667a6017f75785dfe700e2698982c
| 1,310
|
py
|
Python
|
tests/nn.py
|
maikka39/Toy-Neural-Network-Py
|
a76b763e05fb9361a09fc825cdd0dc3606a3cb03
|
[
"MIT"
] | null | null | null |
tests/nn.py
|
maikka39/Toy-Neural-Network-Py
|
a76b763e05fb9361a09fc825cdd0dc3606a3cb03
|
[
"MIT"
] | null | null | null |
tests/nn.py
|
maikka39/Toy-Neural-Network-Py
|
a76b763e05fb9361a09fc825cdd0dc3606a3cb03
|
[
"MIT"
] | null | null | null |
from random import randint
from tnnp import nn as tnnp
nn = tnnp.NeuralNetwork(2, 2, 1)
if nn is None:
raise Exception("Initialization failed!", m.matrix)
nn = tnnp.NeuralNetwork(2, 2, 1)
input = [1, 0]
output = nn.feedforward(input)
if output < [-1] or output > [1]:
raise Exception(".feedforward function failed!", m.matrix)
def formula(x):
# f(x) = mx + b
if x == [0, 0]:
return [-1]
if x == [0, 1]:
return [1]
if x == [1, 0]:
return [1]
if x == [1, 1]:
return [-1]
nn = tnnp.NeuralNetwork(2, 2, 1)
for i in range(50000):
data = [randint(0, 1), randint(0, 1)]
nn.train(data, formula(data))
values = []
for data in [[0, 0], [0, 1], [1, 0], [1, 1]]:
output = nn.feedforward(data)
values.append(round(output[0]))
if not values == [-1, 1, 1, -1]:
raise Exception(
".train function failed! You might want to try running this script again.", values)
nn = tnnp.NeuralNetwork(2, 2, 1)
cp = nn.copy()
if not cp:
raise Exception(".copy function failed!", cp)
nn = tnnp.NeuralNetwork(2, 2, 1)
nn.mutate(lambda n: n * 2)
nn = tnnp.NeuralNetwork(2, 2, 1)
nn.save("test.pkl")
nn2 = tnnp.load("test.pkl")
if nn2.hidden_nodes != 2:
raise Exception(".save/.load function failed!", nn2)
print("No errors were found!")
| 23.818182
| 91
| 0.6
|
from random import randint
from tnnp import nn as tnnp
nn = tnnp.NeuralNetwork(2, 2, 1)
if nn is None:
raise Exception("Initialization failed!", m.matrix)
nn = tnnp.NeuralNetwork(2, 2, 1)
input = [1, 0]
output = nn.feedforward(input)
if output < [-1] or output > [1]:
raise Exception(".feedforward function failed!", m.matrix)
def formula(x):
if x == [0, 0]:
return [-1]
if x == [0, 1]:
return [1]
if x == [1, 0]:
return [1]
if x == [1, 1]:
return [-1]
nn = tnnp.NeuralNetwork(2, 2, 1)
for i in range(50000):
data = [randint(0, 1), randint(0, 1)]
nn.train(data, formula(data))
values = []
for data in [[0, 0], [0, 1], [1, 0], [1, 1]]:
output = nn.feedforward(data)
values.append(round(output[0]))
if not values == [-1, 1, 1, -1]:
raise Exception(
".train function failed! You might want to try running this script again.", values)
nn = tnnp.NeuralNetwork(2, 2, 1)
cp = nn.copy()
if not cp:
raise Exception(".copy function failed!", cp)
nn = tnnp.NeuralNetwork(2, 2, 1)
nn.mutate(lambda n: n * 2)
nn = tnnp.NeuralNetwork(2, 2, 1)
nn.save("test.pkl")
nn2 = tnnp.load("test.pkl")
if nn2.hidden_nodes != 2:
raise Exception(".save/.load function failed!", nn2)
print("No errors were found!")
| true
| true
|
f70b16ab99a5af27e7a27a4a42a400263f5c72af
| 1,759
|
py
|
Python
|
superpoint/models/simple_classifier.py
|
SwagJ/SuperPoint
|
ecbf1d6e809ea8c7c832078ad26d2a74ed2fae29
|
[
"MIT"
] | null | null | null |
superpoint/models/simple_classifier.py
|
SwagJ/SuperPoint
|
ecbf1d6e809ea8c7c832078ad26d2a74ed2fae29
|
[
"MIT"
] | null | null | null |
superpoint/models/simple_classifier.py
|
SwagJ/SuperPoint
|
ecbf1d6e809ea8c7c832078ad26d2a74ed2fae29
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow import layers as tfl
from .base_model import BaseModel, Mode
class SimpleClassifier(BaseModel):
input_spec = {
'image': {'shape': [None, None, None, 1], 'type': tf.float32}
}
required_config_keys = []
default_config = {'data_format': 'channels_first'}
def _model(self, inputs, mode, **config):
x = inputs['image']
if config['data_format'] == 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
params = {'padding': 'SAME', 'data_format': config['data_format']}
x = tfl.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1', **params)
x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params)
x = tfl.conv2d(x, 64, 5, activation=tf.nn.relu, name='conv2', **params)
x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params)
x = tfl.flatten(x)
x = tfl.dense(x, 1024, activation=tf.nn.relu, name='fc1')
x = tfl.dense(x, 10, name='fc2')
if mode == Mode.TRAIN:
return {'logits': x}
else:
return {'logits': x, 'prob': tf.nn.softmax(x), 'pred': tf.argmax(x, axis=-1)}
def _loss(self, outputs, inputs, **config):
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=inputs['label'], logits=outputs['logits']))
return loss
def _metrics(self, outputs, inputs, **config):
metrics = {}
with tf.name_scope('metrics'):
correct_count = tf.equal(outputs['pred'], inputs['label'])
correct_count = tf.cast(correct_count, tf.float32)
metrics['accuracy'] = tf.reduce_mean(correct_count)
return metrics
| 35.897959
| 89
| 0.583854
|
import tensorflow as tf
from tensorflow import layers as tfl
from .base_model import BaseModel, Mode
class SimpleClassifier(BaseModel):
input_spec = {
'image': {'shape': [None, None, None, 1], 'type': tf.float32}
}
required_config_keys = []
default_config = {'data_format': 'channels_first'}
def _model(self, inputs, mode, **config):
x = inputs['image']
if config['data_format'] == 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
params = {'padding': 'SAME', 'data_format': config['data_format']}
x = tfl.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1', **params)
x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params)
x = tfl.conv2d(x, 64, 5, activation=tf.nn.relu, name='conv2', **params)
x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params)
x = tfl.flatten(x)
x = tfl.dense(x, 1024, activation=tf.nn.relu, name='fc1')
x = tfl.dense(x, 10, name='fc2')
if mode == Mode.TRAIN:
return {'logits': x}
else:
return {'logits': x, 'prob': tf.nn.softmax(x), 'pred': tf.argmax(x, axis=-1)}
def _loss(self, outputs, inputs, **config):
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=inputs['label'], logits=outputs['logits']))
return loss
def _metrics(self, outputs, inputs, **config):
metrics = {}
with tf.name_scope('metrics'):
correct_count = tf.equal(outputs['pred'], inputs['label'])
correct_count = tf.cast(correct_count, tf.float32)
metrics['accuracy'] = tf.reduce_mean(correct_count)
return metrics
| true
| true
|
f70b187b54382fd85b3a73c0c1ad86ac689ae9dc
| 3,164
|
py
|
Python
|
src/python/pipelines/xchem/split_fragnet_candidates.py
|
Waztom/pipelines
|
63ac14d05446ced622fd2acb86c9b84dcc5feae8
|
[
"Apache-2.0"
] | 24
|
2017-04-04T19:12:34.000Z
|
2022-03-09T16:29:06.000Z
|
src/python/pipelines/xchem/split_fragnet_candidates.py
|
Waztom/pipelines
|
63ac14d05446ced622fd2acb86c9b84dcc5feae8
|
[
"Apache-2.0"
] | 22
|
2017-06-02T07:03:52.000Z
|
2021-03-27T09:44:08.000Z
|
src/python/pipelines/xchem/split_fragnet_candidates.py
|
Waztom/pipelines
|
63ac14d05446ced622fd2acb86c9b84dcc5feae8
|
[
"Apache-2.0"
] | 19
|
2017-05-18T10:27:58.000Z
|
2021-08-02T10:44:01.000Z
|
#!/usr/bin/env python
# Copyright 2020 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, os, sys, json, traceback
from pipelines_utils import utils
from pipelines_utils import utils
def gen_filename(id, generate_filenames):
if generate_filenames:
return str(count)
else:
return id
def execute(candidates_json, generate_filenames):
with open(candidates_json, 'r') as f:
candidates = json.load(f)
queries = candidates['queries']['molecules']
results = candidates['results']
hitCounts = candidates['hitCounts']
utils.log('Processing', len(queries), 'queries and', len(results), 'results')
num_mols = 0
num_hits = 0
count = 0
ids2Filenames = {}
for query in queries:
id = query['id']
if id in hitCounts:
molfile = query['originalMol']
if generate_filenames:
fname = str(count).zfil(3)
else:
fname = id
utils.log('Using file name of', fname)
with open(fname + '.mol', 'w') as f:
f.write(molfile)
num_hits += 1
ids2Filenames[id] = fname
count += 1
writers = {}
for result in results:
num_mols += 1
for id in result['sourceMols']:
if id in writers:
writer = writers[id]
else:
fname = ids2Filenames[id]
writer = open(fname + '.smi', 'w')
writers[id] = writer
smiles = result['smiles']
#utils.log('Processing', smiles)
writer.write(smiles + '\n')
for w in writers.values():
w.close()
utils.log('Totals - hits:', num_hits, 'outputs:', num_mols)
def main():
"""
Example usage:
python -m pipelines.xchem.split-fragnet-candidates -i ../../data/mpro/expanded-17.json
:return:
"""
parser = argparse.ArgumentParser(description='Split fragnet candidates - Split fragment network expansion into individual sets')
parser.add_argument('-i', '--input', help='JSON containing the expanded candidates)')
parser.add_argument('-g', '--generate-filenames', action='store_true', help='Use automatically generated file names instead of the title field)')
args = parser.parse_args()
utils.log("Split fragnet candidates args: ", args)
infile = args.input
execute(infile, args.generate_filenames)
if __name__ == "__main__":
main()
| 30.423077
| 149
| 0.596081
|
import argparse, os, sys, json, traceback
from pipelines_utils import utils
from pipelines_utils import utils
def gen_filename(id, generate_filenames):
if generate_filenames:
return str(count)
else:
return id
def execute(candidates_json, generate_filenames):
with open(candidates_json, 'r') as f:
candidates = json.load(f)
queries = candidates['queries']['molecules']
results = candidates['results']
hitCounts = candidates['hitCounts']
utils.log('Processing', len(queries), 'queries and', len(results), 'results')
num_mols = 0
num_hits = 0
count = 0
ids2Filenames = {}
for query in queries:
id = query['id']
if id in hitCounts:
molfile = query['originalMol']
if generate_filenames:
fname = str(count).zfil(3)
else:
fname = id
utils.log('Using file name of', fname)
with open(fname + '.mol', 'w') as f:
f.write(molfile)
num_hits += 1
ids2Filenames[id] = fname
count += 1
writers = {}
for result in results:
num_mols += 1
for id in result['sourceMols']:
if id in writers:
writer = writers[id]
else:
fname = ids2Filenames[id]
writer = open(fname + '.smi', 'w')
writers[id] = writer
smiles = result['smiles']
writer.write(smiles + '\n')
for w in writers.values():
w.close()
utils.log('Totals - hits:', num_hits, 'outputs:', num_mols)
def main():
parser = argparse.ArgumentParser(description='Split fragnet candidates - Split fragment network expansion into individual sets')
parser.add_argument('-i', '--input', help='JSON containing the expanded candidates)')
parser.add_argument('-g', '--generate-filenames', action='store_true', help='Use automatically generated file names instead of the title field)')
args = parser.parse_args()
utils.log("Split fragnet candidates args: ", args)
infile = args.input
execute(infile, args.generate_filenames)
if __name__ == "__main__":
main()
| true
| true
|
f70b18a4e556bb5a038129fb8aad566e50ed8df6
| 1,008
|
py
|
Python
|
flarestack/core/astro.py
|
robertdstein/flarestack
|
2ce7e67da336514f6f38f06126a1fbd82131e441
|
[
"MIT"
] | null | null | null |
flarestack/core/astro.py
|
robertdstein/flarestack
|
2ce7e67da336514f6f38f06126a1fbd82131e441
|
[
"MIT"
] | 25
|
2019-11-14T15:46:24.000Z
|
2020-11-27T11:14:22.000Z
|
flarestack/core/astro.py
|
robertdstein/flarestack
|
2ce7e67da336514f6f38f06126a1fbd82131e441
|
[
"MIT"
] | 2
|
2020-01-06T19:39:27.000Z
|
2020-07-16T20:32:29.000Z
|
"""
Function taken from IceCube astro package.
"""
import numpy as np
def angular_distance(lon1, lat1, lon2, lat2):
"""
calculate the angular distince along the great circle
on the surface of a shpere between the points
(`lon1`,`lat1`) and (`lon2`,`lat2`)
This function Works for equatorial coordinates
with right ascension as longitude and declination
as latitude. This function uses the Vincenty formula
for calculating the distance.
Parameters
----------
lon1 : array_like
longitude of first point in radians
lat1 : array_like
latitude of the first point in radians
lon2 : array_like
longitude of second point in radians
lat2 : array_like
latitude of the second point in radians
"""
c1 = np.cos(lat1)
c2 = np.cos(lat2)
s1 = np.sin(lat1)
s2 = np.sin(lat2)
sd = np.sin(lon2 - lon1)
cd = np.cos(lon2 - lon1)
return np.arctan2(np.hypot(c2 * sd, c1 * s2 - s1 * c2 * cd), s1 * s2 + c1 * c2 * cd)
| 28.8
| 88
| 0.647817
|
import numpy as np
def angular_distance(lon1, lat1, lon2, lat2):
c1 = np.cos(lat1)
c2 = np.cos(lat2)
s1 = np.sin(lat1)
s2 = np.sin(lat2)
sd = np.sin(lon2 - lon1)
cd = np.cos(lon2 - lon1)
return np.arctan2(np.hypot(c2 * sd, c1 * s2 - s1 * c2 * cd), s1 * s2 + c1 * c2 * cd)
| true
| true
|
f70b18b4b2bf16ceeb39c12757922047f07bde3e
| 241
|
py
|
Python
|
Chapter_04/actions/admin.py
|
codingEzio/code_py_book_django2_by_example
|
d215d0c87a557685824286822186966b06fa8d59
|
[
"Unlicense"
] | 1
|
2021-04-23T16:35:45.000Z
|
2021-04-23T16:35:45.000Z
|
Chapter_04/actions/admin.py
|
codingEzio/code_py_book_django2_by_example
|
d215d0c87a557685824286822186966b06fa8d59
|
[
"Unlicense"
] | null | null | null |
Chapter_04/actions/admin.py
|
codingEzio/code_py_book_django2_by_example
|
d215d0c87a557685824286822186966b06fa8d59
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Action
@admin.register(Action)
class ActionAdmin(admin.ModelAdmin):
list_display = ('user', 'verb', 'target', 'created')
list_filter = ('created',)
search_fields = ('verb',)
| 24.1
| 56
| 0.697095
|
from django.contrib import admin
from .models import Action
@admin.register(Action)
class ActionAdmin(admin.ModelAdmin):
list_display = ('user', 'verb', 'target', 'created')
list_filter = ('created',)
search_fields = ('verb',)
| true
| true
|
f70b19e8b33df4c0fab1ab2a6d898931dffda3c0
| 4,205
|
py
|
Python
|
azury/asynczury/utils.py
|
citharus/azury.py
|
7079f8f98c68028d17114c830e749254cd483ef2
|
[
"Apache-2.0"
] | null | null | null |
azury/asynczury/utils.py
|
citharus/azury.py
|
7079f8f98c68028d17114c830e749254cd483ef2
|
[
"Apache-2.0"
] | null | null | null |
azury/asynczury/utils.py
|
citharus/azury.py
|
7079f8f98c68028d17114c830e749254cd483ef2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021-present citharus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use utils.py except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Union, Dict
import azury.asynczury as asynczury
from azury.utils import parse_iso
__all__: list[str] = ['to_file', 'to_user', 'to_team']
async def to_file(
client: asynczury.Client,
service: str,
data: Dict[str, Union[str, bool, int, list]],
team: str = '',
) -> asynczury.File:
"""A function to convert the files' data to a :class:`File` object.
Parameters
----------
client: Client
The :class`Client` used to initialize the :class:`User`.
service: str
The service the file is bound to e.g. teams or users.
data: Dict[str, Union[str, bool, int, list]]
The files' data.
team: str
The team id, if the file belongs to a team.
Defaults to an empty string.
Return
------
File
The converted :class:`File` object.
"""
return asynczury.File(
client,
service,
team,
flags=data['flags'] if 'flags' in data else None,
id=data['_id'] if '_id' in data else data['id'],
archived='archived' in data['flags'] if 'flags' in data else None,
trashed='trashed' in data['flags'] if 'flags' in data else None,
favorite='favorite' in data['flags'] if 'flags' in data else None,
downloads=data['downloads'] if 'downloads' in data else None,
views=data['views'] if 'views' in data else None,
user=int(data['user']) if 'user' in data else int(data['author']),
name=data['name'],
size=data['size'],
type=data['type'],
created_at=parse_iso(data['createdAt'])
if 'createdAt' in data else parse_iso(data['uploadedAt']),
updated_at=parse_iso(data['updatedAt']),
)
async def to_user(
client: asynczury.Client,
data: dict,
) -> asynczury.User:
"""A function to convert the user's data to a :class:`User` object.
Parameters
----------
client: Client
The :class`Client` used to initialize the :class:`User`.
data: Dict[str, Union[str, list]]
The user's data.
Returns
-------
User
The converted :class:`User` object.
"""
return asynczury.User(
client,
avatar=data['avatar'],
flags=data['flags'],
connections=data['connections'],
access=data['access'],
id=int(data['_id']),
ip=data['ip'],
token=data['token'],
created_at=parse_iso(data['createdAt']),
updated_at=parse_iso(data['updatedAt']),
username=data['username'],
)
async def to_team(
client: asynczury.Client,
data: Dict[str, Union[str, list]],
) -> asynczury.Team:
"""A function to convert the teams's data to a :class:`Team` object.
Parameters
----------
client: Client
The :class`Client` used to initialize the :class:`User`.
data: Dict[str, Union[str, list]]
The teams's data.
Returns
-------
Team
The converted :class:`Team` object.
"""
return asynczury.Team(
client,
members=[int(user) for user in data['members']],
icon=data['icon'],
flags=data['flags'],
id=data['_id'],
name=data['name'],
owner=int(data['owner']),
created_at=parse_iso(data['createdAt']),
updated_at=parse_iso(data['updatedAt']),
)
| 31.616541
| 75
| 0.572889
|
from __future__ import annotations
from typing import Union, Dict
import azury.asynczury as asynczury
from azury.utils import parse_iso
__all__: list[str] = ['to_file', 'to_user', 'to_team']
async def to_file(
client: asynczury.Client,
service: str,
data: Dict[str, Union[str, bool, int, list]],
team: str = '',
) -> asynczury.File:
return asynczury.File(
client,
service,
team,
flags=data['flags'] if 'flags' in data else None,
id=data['_id'] if '_id' in data else data['id'],
archived='archived' in data['flags'] if 'flags' in data else None,
trashed='trashed' in data['flags'] if 'flags' in data else None,
favorite='favorite' in data['flags'] if 'flags' in data else None,
downloads=data['downloads'] if 'downloads' in data else None,
views=data['views'] if 'views' in data else None,
user=int(data['user']) if 'user' in data else int(data['author']),
name=data['name'],
size=data['size'],
type=data['type'],
created_at=parse_iso(data['createdAt'])
if 'createdAt' in data else parse_iso(data['uploadedAt']),
updated_at=parse_iso(data['updatedAt']),
)
async def to_user(
client: asynczury.Client,
data: dict,
) -> asynczury.User:
return asynczury.User(
client,
avatar=data['avatar'],
flags=data['flags'],
connections=data['connections'],
access=data['access'],
id=int(data['_id']),
ip=data['ip'],
token=data['token'],
created_at=parse_iso(data['createdAt']),
updated_at=parse_iso(data['updatedAt']),
username=data['username'],
)
async def to_team(
client: asynczury.Client,
data: Dict[str, Union[str, list]],
) -> asynczury.Team:
return asynczury.Team(
client,
members=[int(user) for user in data['members']],
icon=data['icon'],
flags=data['flags'],
id=data['_id'],
name=data['name'],
owner=int(data['owner']),
created_at=parse_iso(data['createdAt']),
updated_at=parse_iso(data['updatedAt']),
)
| true
| true
|
f70b1b0b16bd605c6b6c84e932a247ada270dac4
| 6,493
|
py
|
Python
|
pipeline.py
|
tanynova99/2021-2-level-ctlr
|
c8a1456c1d719b974f06193e1b7ab4ba0a607229
|
[
"MIT"
] | null | null | null |
pipeline.py
|
tanynova99/2021-2-level-ctlr
|
c8a1456c1d719b974f06193e1b7ab4ba0a607229
|
[
"MIT"
] | null | null | null |
pipeline.py
|
tanynova99/2021-2-level-ctlr
|
c8a1456c1d719b974f06193e1b7ab4ba0a607229
|
[
"MIT"
] | null | null | null |
"""
Pipeline for text processing implementation
"""
from pathlib import Path
import re
import pymorphy2
from pymystem3 import Mystem
from constants import ASSETS_PATH
from core_utils.article import Article, ArtifactType
class EmptyDirectoryError(Exception):
"""
No data to process
"""
class InconsistentDatasetError(Exception):
"""
Corrupt data:
- numeration is expected to start from 1 and to be continuous
- a number of text files must be equal to the number of meta files
- text files must not be empty
"""
class MorphologicalToken:
"""
Stores language params for each processed token
"""
def __init__(self, original_word):
self.original_word = original_word
self.normalized_form = ''
self.tags_mystem = ''
self.tags_pymorphy = ''
def get_cleaned(self):
"""
Returns lowercased original form of a token
"""
return self.original_word.lower()
def get_single_tagged(self):
"""
Returns normalized lemma with MyStem tags
"""
return f'{self.normalized_form}<{self.tags_mystem}>'
def get_multiple_tagged(self):
"""
Returns normalized lemma with PyMorphy tags
"""
return f'{self.normalized_form}<{self.tags_mystem}>({self.tags_pymorphy})'
class CorpusManager:
"""
Works with articles and stores them
"""
def __init__(self, path_to_raw_txt_data: str):
self.path = Path(path_to_raw_txt_data)
self._storage = {}
self._scan_dataset()
def _scan_dataset(self):
"""
Register each dataset entry
"""
files = self.path.glob('*_raw.txt')
pattern = re.compile(r'(\d+)')
for file in files:
if re.match(pattern, file.name) is not None:
article_id = int(re.match(pattern, file.name).group(0))
self._storage[article_id] = Article(url=None, article_id=article_id)
else:
print("Unsuccessful article id extraction")
def get_articles(self):
"""
Returns storage params
"""
return self._storage
class TextProcessingPipeline:
"""
Process articles from corpus manager
"""
def __init__(self, corpus_manager: CorpusManager):
self.corpus_manager = corpus_manager
def run(self):
"""
Runs pipeline process scenario
"""
articles = self.corpus_manager.get_articles().values()
for article in articles:
raw_text = article.get_raw_text()
processed_tokens = self._process(raw_text)
cleaned_tokens = []
single_tagged_tokens = []
multiple_tagged_tokens = []
for processed_token in processed_tokens:
cleaned_tokens.append(processed_token.get_cleaned())
single_tagged_tokens.append(processed_token.get_single_tagged())
multiple_tagged_tokens.append(processed_token.get_multiple_tagged())
article.save_as(' '.join(cleaned_tokens), ArtifactType.cleaned)
article.save_as(' '.join(single_tagged_tokens), ArtifactType.single_tagged)
article.save_as(' '.join(multiple_tagged_tokens), ArtifactType.multiple_tagged)
def _process(self, raw_text: str):
"""
Processes each token and creates MorphToken class instance
"""
# txt from pdf comes with words like след-ующий
# this replace deals with them
text = raw_text.replace('-\n', '').replace('\n', ' ')
result = Mystem().analyze(text)
# launching morph_tokens list which then is appended with MorphologicalToken class instances
morph_tokens = []
# pymorphy analyzer which will be used for filling pymorphy tags
morph = pymorphy2.MorphAnalyzer()
for token in result:
# pre requisites for the token to be usable
if "analysis" not in token:
continue
if not token.get('analysis'):
continue
if not (token['analysis'][0].get("gr") or token['analysis'][0].get("lex")):
continue
original_word = token["text"]
morph_token = MorphologicalToken(original_word=original_word)
# mystem tags
morph_token.normalized_form = token['analysis'][0]['lex']
morph_token.tags_mystem = token['analysis'][0]['gr']
# pymorphy tags
one_word = morph.parse(original_word)[0]
morph_token.tags_pymorphy = one_word.tag
morph_tokens.append(morph_token)
return morph_tokens
def validate_dataset(path_to_validate):
"""
Validates folder with assets
"""
path = Path(path_to_validate)
if not path.exists():
raise FileNotFoundError
if not path.is_dir():
raise NotADirectoryError
if not any(path.iterdir()):
raise EmptyDirectoryError
file_formats = [".json", ".txt", ".pdf", ".png"]
checker = {}
# creating a dictionary of file indexes
# and checking the formats
pattern = re.compile(r'\d+')
for file in path.iterdir():
match_to = re.match(pattern, file.name)
if not match_to:
raise InconsistentDatasetError("There is a file with incorrect name pattern.")
if file.stat().st_size == 0:
raise InconsistentDatasetError("File is empty.")
file_index = file.name.split("_")[0]
if file_index not in checker.keys():
checker[file_index] = 1
else:
checker[file_index] += 1
if file.suffix not in file_formats:
raise FileNotFoundError("File with incorrect format.")
# checking that there are necessary files with said index
if not all(value >= 2 for value in checker.values()):
raise InconsistentDatasetError("There are files missing.")
# checking whether keys are consistent from 1 to N (max in files indices)
current_i = list(int(x) for x in checker)
ideal_i = range(1, max(current_i) + 1)
if not set(current_i) & set(ideal_i) == set(ideal_i):
raise InconsistentDatasetError("The numbering is inconsistent.")
def main():
validate_dataset(ASSETS_PATH)
corpus_manager = CorpusManager(ASSETS_PATH)
pipeline = TextProcessingPipeline(corpus_manager)
pipeline.run()
if __name__ == "__main__":
main()
| 28.108225
| 100
| 0.624365
|
from pathlib import Path
import re
import pymorphy2
from pymystem3 import Mystem
from constants import ASSETS_PATH
from core_utils.article import Article, ArtifactType
class EmptyDirectoryError(Exception):
class InconsistentDatasetError(Exception):
class MorphologicalToken:
def __init__(self, original_word):
self.original_word = original_word
self.normalized_form = ''
self.tags_mystem = ''
self.tags_pymorphy = ''
def get_cleaned(self):
return self.original_word.lower()
def get_single_tagged(self):
return f'{self.normalized_form}<{self.tags_mystem}>'
def get_multiple_tagged(self):
return f'{self.normalized_form}<{self.tags_mystem}>({self.tags_pymorphy})'
class CorpusManager:
def __init__(self, path_to_raw_txt_data: str):
self.path = Path(path_to_raw_txt_data)
self._storage = {}
self._scan_dataset()
def _scan_dataset(self):
files = self.path.glob('*_raw.txt')
pattern = re.compile(r'(\d+)')
for file in files:
if re.match(pattern, file.name) is not None:
article_id = int(re.match(pattern, file.name).group(0))
self._storage[article_id] = Article(url=None, article_id=article_id)
else:
print("Unsuccessful article id extraction")
def get_articles(self):
return self._storage
class TextProcessingPipeline:
def __init__(self, corpus_manager: CorpusManager):
self.corpus_manager = corpus_manager
def run(self):
articles = self.corpus_manager.get_articles().values()
for article in articles:
raw_text = article.get_raw_text()
processed_tokens = self._process(raw_text)
cleaned_tokens = []
single_tagged_tokens = []
multiple_tagged_tokens = []
for processed_token in processed_tokens:
cleaned_tokens.append(processed_token.get_cleaned())
single_tagged_tokens.append(processed_token.get_single_tagged())
multiple_tagged_tokens.append(processed_token.get_multiple_tagged())
article.save_as(' '.join(cleaned_tokens), ArtifactType.cleaned)
article.save_as(' '.join(single_tagged_tokens), ArtifactType.single_tagged)
article.save_as(' '.join(multiple_tagged_tokens), ArtifactType.multiple_tagged)
def _process(self, raw_text: str):
text = raw_text.replace('-\n', '').replace('\n', ' ')
result = Mystem().analyze(text)
morph_tokens = []
morph = pymorphy2.MorphAnalyzer()
for token in result:
if "analysis" not in token:
continue
if not token.get('analysis'):
continue
if not (token['analysis'][0].get("gr") or token['analysis'][0].get("lex")):
continue
original_word = token["text"]
morph_token = MorphologicalToken(original_word=original_word)
morph_token.normalized_form = token['analysis'][0]['lex']
morph_token.tags_mystem = token['analysis'][0]['gr']
one_word = morph.parse(original_word)[0]
morph_token.tags_pymorphy = one_word.tag
morph_tokens.append(morph_token)
return morph_tokens
def validate_dataset(path_to_validate):
path = Path(path_to_validate)
if not path.exists():
raise FileNotFoundError
if not path.is_dir():
raise NotADirectoryError
if not any(path.iterdir()):
raise EmptyDirectoryError
file_formats = [".json", ".txt", ".pdf", ".png"]
checker = {}
pattern = re.compile(r'\d+')
for file in path.iterdir():
match_to = re.match(pattern, file.name)
if not match_to:
raise InconsistentDatasetError("There is a file with incorrect name pattern.")
if file.stat().st_size == 0:
raise InconsistentDatasetError("File is empty.")
file_index = file.name.split("_")[0]
if file_index not in checker.keys():
checker[file_index] = 1
else:
checker[file_index] += 1
if file.suffix not in file_formats:
raise FileNotFoundError("File with incorrect format.")
if not all(value >= 2 for value in checker.values()):
raise InconsistentDatasetError("There are files missing.")
current_i = list(int(x) for x in checker)
ideal_i = range(1, max(current_i) + 1)
if not set(current_i) & set(ideal_i) == set(ideal_i):
raise InconsistentDatasetError("The numbering is inconsistent.")
def main():
validate_dataset(ASSETS_PATH)
corpus_manager = CorpusManager(ASSETS_PATH)
pipeline = TextProcessingPipeline(corpus_manager)
pipeline.run()
if __name__ == "__main__":
main()
| true
| true
|
f70b1b503b4ddb49f9d18776b11905b96556d553
| 1,458
|
py
|
Python
|
setup.py
|
dmitrii-sim/ninjin
|
6c3edb46ec873f28ed0b1fcbe20193445e3107e9
|
[
"MIT"
] | 2
|
2020-06-03T07:44:46.000Z
|
2020-06-05T11:30:46.000Z
|
setup.py
|
dmitrii-sim/ninjin
|
6c3edb46ec873f28ed0b1fcbe20193445e3107e9
|
[
"MIT"
] | null | null | null |
setup.py
|
dmitrii-sim/ninjin
|
6c3edb46ec873f28ed0b1fcbe20193445e3107e9
|
[
"MIT"
] | 1
|
2020-06-18T15:59:18.000Z
|
2020-06-18T15:59:18.000Z
|
import os
from setuptools import (
find_packages,
setup
)
__version__ = open("VERSION", 'r').read().strip()
REQUIREMENTS_FOLDER = os.getenv('REQUIREMENTS_PATH', '')
requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements.txt"), 'r')]
setup(
name='ninjin',
version=__version__,
keywords="ninjin",
packages=find_packages(exclude=['tests']),
install_requires=requirements,
extras_require={
'dev': [
'mock',
'async-generator==1.10',
'faker',
'flake8',
'flake8-builtins',
'flake8-coding',
'flake8-commas',
'flake8-comprehensions',
'flake8-debugger',
'flake8-docstrings',
'flake8-pep3101',
'flake8-quotes',
'flake8-string-format',
'flake8-super-call',
'flake8-eradicate',
'flake8-print',
'flake8-isort',
'pytest',
'pytest-factoryboy',
'pytest-pep8',
'pytest-mock==3.1.0',
'pytest-asyncio==0.11.0',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
]
)
| 26.509091
| 106
| 0.526749
|
import os
from setuptools import (
find_packages,
setup
)
__version__ = open("VERSION", 'r').read().strip()
REQUIREMENTS_FOLDER = os.getenv('REQUIREMENTS_PATH', '')
requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements.txt"), 'r')]
setup(
name='ninjin',
version=__version__,
keywords="ninjin",
packages=find_packages(exclude=['tests']),
install_requires=requirements,
extras_require={
'dev': [
'mock',
'async-generator==1.10',
'faker',
'flake8',
'flake8-builtins',
'flake8-coding',
'flake8-commas',
'flake8-comprehensions',
'flake8-debugger',
'flake8-docstrings',
'flake8-pep3101',
'flake8-quotes',
'flake8-string-format',
'flake8-super-call',
'flake8-eradicate',
'flake8-print',
'flake8-isort',
'pytest',
'pytest-factoryboy',
'pytest-pep8',
'pytest-mock==3.1.0',
'pytest-asyncio==0.11.0',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
]
)
| true
| true
|
f70b1b67040779aa3fec10d949f0b6edaadebcce
| 4,918
|
py
|
Python
|
src/prism-fruit/Games-DQL/examples/games/car/networkx/readwrite/sparsegraph6.py
|
kushgrover/apt-vs-dift
|
250f64e6c442f6018cab65ec6979d9568a842f57
|
[
"MIT"
] | null | null | null |
src/prism-fruit/Games-DQL/examples/games/car/networkx/readwrite/sparsegraph6.py
|
kushgrover/apt-vs-dift
|
250f64e6c442f6018cab65ec6979d9568a842f57
|
[
"MIT"
] | null | null | null |
src/prism-fruit/Games-DQL/examples/games/car/networkx/readwrite/sparsegraph6.py
|
kushgrover/apt-vs-dift
|
250f64e6c442f6018cab65ec6979d9568a842f57
|
[
"MIT"
] | null | null | null |
"""
**************
SparseGraph 6
**************
Read graphs in graph6 and sparse6 format.
Format
------
"graph6 and sparse6 are formats for storing undirected graphs in a
compact manner, using only printable ASCII characters. Files in these
formats have text type and contain one line per graph."
http://cs.anu.edu.au/~bdm/data/formats.html
See http://cs.anu.edu.au/~bdm/data/formats.txt for details.
"""
# Original author: D. Eppstein, UC Irvine, August 12, 2003.
# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['read_graph6', 'parse_graph6', 'read_graph6_list',
'read_sparse6', 'parse_sparse6', 'read_sparse6_list']
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
# graph6
def read_graph6(path):
"""Read simple undirected graphs in graph6 format from path.
Returns a single Graph.
"""
return read_graph6_list(path)[0]
def parse_graph6(str):
"""Read a simple undirected graph in graph6 format from string.
Returns a single Graph.
"""
def bits():
"""Return sequence of individual bits from 6-bit-per-value
list of data values."""
for d in data:
for i in [5,4,3,2,1,0]:
yield (d>>i)&1
if str.startswith('>>graph6<<'):
str = str[10:]
data = graph6data(str)
n, data = graph6n(data)
nd = (n*(n-1)//2 + 5) // 6
if len(data) != nd:
raise NetworkXError(\
'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6))
G=nx.Graph()
G.add_nodes_from(range(n))
for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()):
if b: G.add_edge(i,j)
return G
@open_file(0,mode='rt')
def read_graph6_list(path):
"""Read simple undirected graphs in graph6 format from path.
Returns a list of Graphs, one for each line in file.
"""
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_graph6(line))
return glist
# sparse6
def read_sparse6(path):
"""Read simple undirected graphs in sparse6 format from path.
Returns a single MultiGraph."""
return read_sparse6_list(path)[0]
@open_file(0,mode='rt')
def read_sparse6_list(path):
"""Read undirected graphs in sparse6 format from path.
Returns a list of MultiGraphs, one for each line in file."""
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_sparse6(line))
return glist
def parse_sparse6(string):
"""Read undirected graph in sparse6 format from string.
Returns a MultiGraph.
"""
if string.startswith('>>sparse6<<'):
string = str[10:]
if not string.startswith(':'):
raise NetworkXError('Expected colon in sparse6')
n, data = graph6n(graph6data(string[1:]))
k = 1
while 1<<k < n:
k += 1
def parseData():
"""Return stream of pairs b[i], x[i] for sparse6 format."""
chunks = iter(data)
d = None # partial data word
dLen = 0 # how many unparsed bits are left in d
while 1:
if dLen < 1:
d = next(chunks)
dLen = 6
dLen -= 1
b = (d>>dLen) & 1 # grab top remaining bit
x = d & ((1<<dLen)-1) # partially built up value of x
xLen = dLen # how many bits included so far in x
while xLen < k: # now grab full chunks until we have enough
d = next(chunks)
dLen = 6
x = (x<<6) + d
xLen += 6
x = (x >> (xLen - k)) # shift back the extra bits
dLen = xLen - k
yield b,x
v = 0
G=nx.MultiGraph()
G.add_nodes_from(range(n))
for b,x in parseData():
if b: v += 1
if x >= n: break # padding with ones can cause overlarge number here
elif x > v: v = x
else:
G.add_edge(x,v)
return G
# helper functions
def graph6data(str):
"""Convert graph6 character sequence to 6-bit integers."""
v = [ord(c)-63 for c in str]
if min(v) < 0 or max(v) > 63:
return None
return v
def graph6n(data):
"""Read initial one or four-unit value from graph6 sequence.
Return value, rest of seq."""
if data[0] <= 62:
return data[0], data[1:]
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
| 28.929412
| 81
| 0.568117
|
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['read_graph6', 'parse_graph6', 'read_graph6_list',
'read_sparse6', 'parse_sparse6', 'read_sparse6_list']
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
def read_graph6(path):
return read_graph6_list(path)[0]
def parse_graph6(str):
def bits():
for d in data:
for i in [5,4,3,2,1,0]:
yield (d>>i)&1
if str.startswith('>>graph6<<'):
str = str[10:]
data = graph6data(str)
n, data = graph6n(data)
nd = (n*(n-1)//2 + 5) // 6
if len(data) != nd:
raise NetworkXError(\
'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6))
G=nx.Graph()
G.add_nodes_from(range(n))
for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()):
if b: G.add_edge(i,j)
return G
@open_file(0,mode='rt')
def read_graph6_list(path):
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_graph6(line))
return glist
def read_sparse6(path):
return read_sparse6_list(path)[0]
@open_file(0,mode='rt')
def read_sparse6_list(path):
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_sparse6(line))
return glist
def parse_sparse6(string):
if string.startswith('>>sparse6<<'):
string = str[10:]
if not string.startswith(':'):
raise NetworkXError('Expected colon in sparse6')
n, data = graph6n(graph6data(string[1:]))
k = 1
while 1<<k < n:
k += 1
def parseData():
chunks = iter(data)
d = None
dLen = 0
while 1:
if dLen < 1:
d = next(chunks)
dLen = 6
dLen -= 1
b = (d>>dLen) & 1
x = d & ((1<<dLen)-1)
xLen = dLen
while xLen < k:
d = next(chunks)
dLen = 6
x = (x<<6) + d
xLen += 6
x = (x >> (xLen - k))
dLen = xLen - k
yield b,x
v = 0
G=nx.MultiGraph()
G.add_nodes_from(range(n))
for b,x in parseData():
if b: v += 1
if x >= n: break
elif x > v: v = x
else:
G.add_edge(x,v)
return G
def graph6data(str):
v = [ord(c)-63 for c in str]
if min(v) < 0 or max(v) > 63:
return None
return v
def graph6n(data):
if data[0] <= 62:
return data[0], data[1:]
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
| true
| true
|
f70b1bf7e41ca49a3802c244cb6df05ffb1e5edd
| 3,203
|
py
|
Python
|
mars/dataframe/fetch/core.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/fetch/core.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/fetch/core.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from ...serialize.core import TupleField, ValueType, Int8Field
from ...operands import Fetch, FetchShuffle
from ...utils import on_serialize_shape, on_deserialize_shape
from ..operands import DataFrameOperandMixin, ObjectType
class DataFrameFetchMixin(DataFrameOperandMixin):
def check_inputs(self, inputs):
# no inputs
if inputs and len(inputs) > 0:
raise ValueError("%s has no inputs" % type(self).__name__)
@classmethod
def tile(cls, op):
raise NotImplementedError('Fetch tile cannot be handled by operand itself')
@classmethod
def execute(cls, ctx, op):
# fetch op need to do nothing
pass
class DataFrameFetch(Fetch, DataFrameFetchMixin):
# required fields
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_object_type = Int8Field('object_type', on_serialize=operator.attrgetter('value'),
on_deserialize=ObjectType)
def __init__(self, to_fetch_key=None, sparse=False, object_type=None, **kw):
super(DataFrameFetch, self).__init__(
_to_fetch_key=to_fetch_key, _sparse=sparse, _object_type=object_type, **kw)
@property
def object_type(self):
return self._object_type
def _new_chunks(self, inputs, kws=None, **kw):
if '_key' in kw and self._to_fetch_key is None:
self._to_fetch_key = kw['_key']
if '_shape' in kw and self._shape is None:
self._shape = kw['_shape']
return super(DataFrameFetch, self)._new_chunks(inputs, kws=kws, **kw)
def _new_tileables(self, inputs, kws=None, **kw):
if '_key' in kw and self._to_fetch_key is None:
self._to_fetch_key = kw['_key']
return super(DataFrameFetch, self)._new_tileables(inputs, kws=kws, **kw)
class DataFrameFetchShuffle(FetchShuffle, DataFrameFetchMixin):
# required fields
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_object_type = Int8Field('object_type', on_serialize=operator.attrgetter('value'),
on_deserialize=ObjectType)
def __init__(self, to_fetch_keys=None, to_fetch_idxes=None, object_type=None, **kw):
super(DataFrameFetchShuffle, self).__init__(
_to_fetch_keys=to_fetch_keys, _to_fetch_idxes=to_fetch_idxes,
_object_type=object_type, **kw)
@property
def object_type(self):
return self._object_type
| 38.590361
| 93
| 0.696222
|
import operator
from ...serialize.core import TupleField, ValueType, Int8Field
from ...operands import Fetch, FetchShuffle
from ...utils import on_serialize_shape, on_deserialize_shape
from ..operands import DataFrameOperandMixin, ObjectType
class DataFrameFetchMixin(DataFrameOperandMixin):
def check_inputs(self, inputs):
if inputs and len(inputs) > 0:
raise ValueError("%s has no inputs" % type(self).__name__)
@classmethod
def tile(cls, op):
raise NotImplementedError('Fetch tile cannot be handled by operand itself')
@classmethod
def execute(cls, ctx, op):
pass
class DataFrameFetch(Fetch, DataFrameFetchMixin):
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_object_type = Int8Field('object_type', on_serialize=operator.attrgetter('value'),
on_deserialize=ObjectType)
def __init__(self, to_fetch_key=None, sparse=False, object_type=None, **kw):
super(DataFrameFetch, self).__init__(
_to_fetch_key=to_fetch_key, _sparse=sparse, _object_type=object_type, **kw)
@property
def object_type(self):
return self._object_type
def _new_chunks(self, inputs, kws=None, **kw):
if '_key' in kw and self._to_fetch_key is None:
self._to_fetch_key = kw['_key']
if '_shape' in kw and self._shape is None:
self._shape = kw['_shape']
return super(DataFrameFetch, self)._new_chunks(inputs, kws=kws, **kw)
def _new_tileables(self, inputs, kws=None, **kw):
if '_key' in kw and self._to_fetch_key is None:
self._to_fetch_key = kw['_key']
return super(DataFrameFetch, self)._new_tileables(inputs, kws=kws, **kw)
class DataFrameFetchShuffle(FetchShuffle, DataFrameFetchMixin):
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_object_type = Int8Field('object_type', on_serialize=operator.attrgetter('value'),
on_deserialize=ObjectType)
def __init__(self, to_fetch_keys=None, to_fetch_idxes=None, object_type=None, **kw):
super(DataFrameFetchShuffle, self).__init__(
_to_fetch_keys=to_fetch_keys, _to_fetch_idxes=to_fetch_idxes,
_object_type=object_type, **kw)
@property
def object_type(self):
return self._object_type
| true
| true
|
f70b1ca4a8dd551f3d5221559de70f07c52b4a6d
| 1,206
|
py
|
Python
|
ssseg/cfgs/memorynet/cfgs_cocostuff_resnet101os8.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 41
|
2021-08-28T01:29:19.000Z
|
2022-03-30T11:28:37.000Z
|
ssseg/cfgs/memorynet/cfgs_cocostuff_resnet101os8.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 6
|
2021-08-31T08:54:39.000Z
|
2021-11-02T10:45:47.000Z
|
ssseg/cfgs/memorynet/cfgs_cocostuff_resnet101os8.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 1
|
2021-09-08T01:41:10.000Z
|
2021-09-08T01:41:10.000Z
|
'''define the config file for cocostuff and resnet101os8'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'cocostuff',
'rootdir': os.path.join(os.getcwd(), 'COCO'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 30
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 182,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'memorynet_resnet101os8_cocostuff_train',
'logfilepath': 'memorynet_resnet101os8_cocostuff_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'memorynet_resnet101os8_cocostuff_test',
'logfilepath': 'memorynet_resnet101os8_cocostuff_test/test.log',
'resultsavepath': 'memorynet_resnet101os8_cocostuff_test/memorynet_resnet101os8_cocostuff_results.pkl'
}
)
| 26.217391
| 110
| 0.722222
|
import os
from .base_cfg import *
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'cocostuff',
'rootdir': os.path.join(os.getcwd(), 'COCO'),
})
DATALOADER_CFG = DATALOADER_CFG.copy()
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 30
}
)
LOSSES_CFG = LOSSES_CFG.copy()
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 182,
}
)
INFERENCE_CFG = INFERENCE_CFG.copy()
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'memorynet_resnet101os8_cocostuff_train',
'logfilepath': 'memorynet_resnet101os8_cocostuff_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'memorynet_resnet101os8_cocostuff_test',
'logfilepath': 'memorynet_resnet101os8_cocostuff_test/test.log',
'resultsavepath': 'memorynet_resnet101os8_cocostuff_test/memorynet_resnet101os8_cocostuff_results.pkl'
}
)
| true
| true
|
f70b1daf8d65cc9109c42a04aba4fff0fcbd1f13
| 5,875
|
py
|
Python
|
bgp/simglucose/controller/basal_bolus_ctrller.py
|
aypan17/value_learning
|
240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe
|
[
"MIT"
] | null | null | null |
bgp/simglucose/controller/basal_bolus_ctrller.py
|
aypan17/value_learning
|
240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe
|
[
"MIT"
] | null | null | null |
bgp/simglucose/controller/basal_bolus_ctrller.py
|
aypan17/value_learning
|
240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe
|
[
"MIT"
] | null | null | null |
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import pkg_resources
import logging
from collections import namedtuple
logger = logging.getLogger(__name__)
CONTROL_QUEST = '/source/dir/simglucose/params/Quest.csv'
PATIENT_PARA_FILE = '/source/dir/simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = pd.read_csv(
PATIENT_PARA_FILE)
self.target = target
def policy(self, observation, reward, done, **kwargs):
sample_time = kwargs.get('sample_time', 1)
pname = kwargs.get('patient_name')
meal = kwargs.get('meal')
action = self._bb_policy(
pname,
meal,
observation.CGM,
sample_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_time):
if any(self.quest.Name.str.match(name)):
q = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = np.asscalar(params.u2ss.values)
BW = np.asscalar(params.BW.values)
else:
q = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43
BW = 57.0
basal = u2ss * BW / 6000
if meal > 0:
logger.info('Calculating bolus ...')
logger.debug('glucose = {}'.format(glucose))
bolus = np.asscalar(meal / q.CR.values + (glucose > 150)
* (glucose - self.target) / q.CF.values)
else:
bolus = 0
bolus = bolus / env_sample_time
action = Action(basal=basal, bolus=bolus)
return action
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.target = target
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_rate = sample_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.last_cf = np.inf
self.corrected = corrected
self.use_low_lim = low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.get('carbs')
glucose = kwargs.get('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
# assuming carbs are already multiplied by sampling rate
carb_correct = (carbs/self.sample_rate) / self.cr # TODO: not sure about this
hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.last_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.last_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.last_cf += self.sample_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def get_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal * basal_adj
self.cr += self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.last_cf = np.inf
class MyController(Controller):
def __init__(self, init_state):
self.init_state = init_state
self.state = init_state
def policy(self, observation, reward, done, **info):
'''
Every controller must have this implementation!
----
Inputs:
observation - a namedtuple defined in simglucose.simulation.env. For
now, it only has one entry: blood glucose level measured
by CGM sensor.
reward - current reward returned by environment
done - True, game over. False, game continues
info - additional information as key word arguments,
simglucose.simulation.env.T1DSimEnv returns patient_name
and sample_time
----
Output:
action - a namedtuple defined at the beginning of this file. The
controller action contains two entries: basal, bolus
'''
self.state = observation
action = Action(basal=0, bolus=0)
return action
def reset(self):
'''
Reset the controller state to inital state, must be implemented
'''
self.state = self.init_state
| 35.179641
| 99
| 0.580766
|
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import pkg_resources
import logging
from collections import namedtuple
logger = logging.getLogger(__name__)
CONTROL_QUEST = '/source/dir/simglucose/params/Quest.csv'
PATIENT_PARA_FILE = '/source/dir/simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = pd.read_csv(
PATIENT_PARA_FILE)
self.target = target
def policy(self, observation, reward, done, **kwargs):
sample_time = kwargs.get('sample_time', 1)
pname = kwargs.get('patient_name')
meal = kwargs.get('meal')
action = self._bb_policy(
pname,
meal,
observation.CGM,
sample_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_time):
if any(self.quest.Name.str.match(name)):
q = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = np.asscalar(params.u2ss.values)
BW = np.asscalar(params.BW.values)
else:
q = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43
BW = 57.0
basal = u2ss * BW / 6000
if meal > 0:
logger.info('Calculating bolus ...')
logger.debug('glucose = {}'.format(glucose))
bolus = np.asscalar(meal / q.CR.values + (glucose > 150)
* (glucose - self.target) / q.CF.values)
else:
bolus = 0
bolus = bolus / env_sample_time
action = Action(basal=basal, bolus=bolus)
return action
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.target = target
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_rate = sample_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.last_cf = np.inf
self.corrected = corrected
self.use_low_lim = low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.get('carbs')
glucose = kwargs.get('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
carb_correct = (carbs/self.sample_rate) / self.cr
hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.last_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.last_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.last_cf += self.sample_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def get_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal * basal_adj
self.cr += self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.last_cf = np.inf
class MyController(Controller):
def __init__(self, init_state):
self.init_state = init_state
self.state = init_state
def policy(self, observation, reward, done, **info):
self.state = observation
action = Action(basal=0, bolus=0)
return action
def reset(self):
self.state = self.init_state
| true
| true
|
f70b1e2720f8ee99979dca1f565540a31b3627d9
| 11,404
|
py
|
Python
|
gcloud/connection.py
|
grapefruit623/gcloud-python
|
83d130e2cfb0bf867d7ba165ff157d31d52f1b35
|
[
"Apache-2.0"
] | null | null | null |
gcloud/connection.py
|
grapefruit623/gcloud-python
|
83d130e2cfb0bf867d7ba165ff157d31d52f1b35
|
[
"Apache-2.0"
] | null | null | null |
gcloud/connection.py
|
grapefruit623/gcloud-python
|
83d130e2cfb0bf867d7ba165ff157d31d52f1b35
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared implementation of connections to API servers."""
import json
from pkg_resources import get_distribution
import six
from six.moves.urllib.parse import urlencode # pylint: disable=F0401
import httplib2
from gcloud.credentials import get_credentials
from gcloud.exceptions import make_exception
API_BASE_URL = 'https://www.googleapis.com'
"""The base of the API call URL."""
class Connection(object):
"""A generic connection to Google Cloud Platform.
Subclasses should understand only the basic types in method arguments,
however they should be capable of returning advanced types.
If no value is passed in for ``http``, a :class:`httplib2.Http` object
will be created and authorized with the ``credentials``. If not, the
``credentials`` and ``http`` need not be related.
Subclasses may seek to use the private key from ``credentials`` to sign
data.
A custom (non-``httplib2``) HTTP object must have a ``request`` method
which accepts the following arguments:
* ``uri``
* ``method``
* ``body``
* ``headers``
In addition, ``redirections`` and ``connection_type`` may be used.
Without the use of ``credentials.authorize(http)``, a custom ``http``
object will also need to be able to add a bearer token to API
requests and handle token refresh on 401 errors.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for this connection.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests.
"""
USER_AGENT = "gcloud-python/{0}".format(get_distribution('gcloud').version)
"""The user agent for gcloud-python requests."""
def __init__(self, credentials=None, http=None):
self._http = http
self._credentials = credentials
@property
def credentials(self):
"""Getter for current credentials.
:rtype: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:returns: The credentials object associated with this connection.
"""
return self._credentials
@property
def http(self):
"""A getter for the HTTP transport used in talking to the API.
:rtype: :class:`httplib2.Http`
:returns: A Http object used to transport data.
"""
if self._http is None:
self._http = httplib2.Http()
if self._credentials:
self._http = self._credentials.authorize(self._http)
return self._http
class JSONConnection(Connection):
"""A connection to a Google JSON-based API.
These APIs are discovery based. For reference:
https://developers.google.com/discovery/
This defines :meth:`Connection.api_request` for making a generic JSON
API request and API requests are created elsewhere.
The class constants
* ``API_BASE_URL``
* ``API_VERSION``
* ``API_URL_TEMPLATE``
must be updated by subclasses.
"""
API_BASE_URL = None
"""The base of the API call URL."""
API_VERSION = None
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = None
"""A template for the URL of a particular API call."""
@classmethod
def build_api_url(cls, path, query_params=None,
api_base_url=None, api_version=None):
"""Construct an API url given a few components, some optional.
Typically, you shouldn't need to use this method.
:type path: string
:param path: The path to the resource (ie, ``'/b/bucket-name'``).
:type query_params: dict
:param query_params: A dictionary of keys and values to insert into
the query string of the URL.
:type api_base_url: string
:param api_base_url: The base URL for the API endpoint.
Typically you won't have to provide this.
:type api_version: string
:param api_version: The version of the API to call.
Typically you shouldn't provide this and instead
use the default for the library.
:rtype: string
:returns: The URL assembled from the pieces provided.
"""
api_base_url = api_base_url or cls.API_BASE_URL
url = cls.API_URL_TEMPLATE.format(
api_base_url=(api_base_url or cls.API_BASE_URL),
api_version=(api_version or cls.API_VERSION),
path=path)
query_params = query_params or {}
if query_params:
url += '?' + urlencode(query_params)
return url
def _make_request(self, method, url, data=None, content_type=None,
headers=None):
"""A low level method to send a request to the API.
Typically, you shouldn't need to use this method.
:type method: string
:param method: The HTTP method to use in the request.
:type url: string
:param url: The URL to send the request to.
:type data: string
:param data: The data to send as the body of the request.
:type content_type: string
:param content_type: The proper MIME type of the data provided.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response,
returned by :meth:`_do_request`.
"""
headers = headers or {}
headers['Accept-Encoding'] = 'gzip'
if data:
content_length = len(str(data))
else:
content_length = 0
headers['Content-Length'] = content_length
if content_type:
headers['Content-Type'] = content_type
headers['User-Agent'] = self.USER_AGENT
return self._do_request(method, url, headers, data)
def _do_request(self, method, url, headers, data):
"""Low-level helper: perform the actual API request over HTTP.
Allows batch context managers to override and defer a request.
:type method: string
:param method: The HTTP method to use in the request.
:type url: string
:param url: The URL to send the request to.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:type data: string
:param data: The data to send as the body of the request.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response.
"""
return self.http.request(uri=url, method=method, headers=headers,
body=data)
def api_request(self, method, path, query_params=None,
data=None, content_type=None,
api_base_url=None, api_version=None,
expect_json=True):
"""Make a request over the HTTP transport to the API.
You shouldn't need to use this method, but if you plan to
interact with the API using these primitives, this is the
correct one to use.
:type method: string
:param method: The HTTP method name (ie, ``GET``, ``POST``, etc).
Required.
:type path: string
:param path: The path to the resource (ie, ``'/b/bucket-name'``).
Required.
:type query_params: dict
:param query_params: A dictionary of keys and values to insert into
the query string of the URL. Default is
empty dict.
:type data: string
:param data: The data to send as the body of the request. Default is
the empty string.
:type content_type: string
:param content_type: The proper MIME type of the data provided. Default
is None.
:type api_base_url: string
:param api_base_url: The base URL for the API endpoint.
Typically you won't have to provide this.
Default is the standard API base URL.
:type api_version: string
:param api_version: The version of the API to call. Typically
you shouldn't provide this and instead use
the default for the library. Default is the
latest API version supported by
gcloud-python.
:type expect_json: boolean
:param expect_json: If True, this method will try to parse the
response as JSON and raise an exception if
that cannot be done. Default is True.
:raises: Exception if the response code is not 200 OK.
"""
url = self.build_api_url(path=path, query_params=query_params,
api_base_url=api_base_url,
api_version=api_version)
# Making the executive decision that any dictionary
# data will be sent properly as JSON.
if data and isinstance(data, dict):
data = json.dumps(data)
content_type = 'application/json'
response, content = self._make_request(
method=method, url=url, data=data, content_type=content_type)
if not 200 <= response.status < 300:
raise make_exception(response, content)
if content and expect_json:
content_type = response.get('content-type', '')
if not content_type.startswith('application/json'):
raise TypeError('Expected JSON, got %s' % content_type)
if isinstance(content, six.binary_type):
content = content.decode('utf-8')
return json.loads(content)
return content
def get_scoped_connection(klass, scopes):
"""Create a scoped connection to GCloud.
:type klass: subclass of :class:`gcloud.connection.Connection`
:param klass: the specific ``Connection`` class to instantiate.
:type scopes: list of URLs
:param scopes: the effective service auth scopes for the connection.
:rtype: instance of ``klass``
:returns: A connection defined with the proper credentials.
"""
implicit_credentials = get_credentials()
scoped_credentials = implicit_credentials.create_scoped(scopes)
return klass(credentials=scoped_credentials)
| 35.52648
| 79
| 0.623202
|
import json
from pkg_resources import get_distribution
import six
from six.moves.urllib.parse import urlencode
import httplib2
from gcloud.credentials import get_credentials
from gcloud.exceptions import make_exception
API_BASE_URL = 'https://www.googleapis.com'
class Connection(object):
USER_AGENT = "gcloud-python/{0}".format(get_distribution('gcloud').version)
def __init__(self, credentials=None, http=None):
self._http = http
self._credentials = credentials
@property
def credentials(self):
return self._credentials
@property
def http(self):
if self._http is None:
self._http = httplib2.Http()
if self._credentials:
self._http = self._credentials.authorize(self._http)
return self._http
class JSONConnection(Connection):
API_BASE_URL = None
API_VERSION = None
API_URL_TEMPLATE = None
@classmethod
def build_api_url(cls, path, query_params=None,
api_base_url=None, api_version=None):
api_base_url = api_base_url or cls.API_BASE_URL
url = cls.API_URL_TEMPLATE.format(
api_base_url=(api_base_url or cls.API_BASE_URL),
api_version=(api_version or cls.API_VERSION),
path=path)
query_params = query_params or {}
if query_params:
url += '?' + urlencode(query_params)
return url
def _make_request(self, method, url, data=None, content_type=None,
headers=None):
headers = headers or {}
headers['Accept-Encoding'] = 'gzip'
if data:
content_length = len(str(data))
else:
content_length = 0
headers['Content-Length'] = content_length
if content_type:
headers['Content-Type'] = content_type
headers['User-Agent'] = self.USER_AGENT
return self._do_request(method, url, headers, data)
def _do_request(self, method, url, headers, data):
return self.http.request(uri=url, method=method, headers=headers,
body=data)
def api_request(self, method, path, query_params=None,
data=None, content_type=None,
api_base_url=None, api_version=None,
expect_json=True):
url = self.build_api_url(path=path, query_params=query_params,
api_base_url=api_base_url,
api_version=api_version)
if data and isinstance(data, dict):
data = json.dumps(data)
content_type = 'application/json'
response, content = self._make_request(
method=method, url=url, data=data, content_type=content_type)
if not 200 <= response.status < 300:
raise make_exception(response, content)
if content and expect_json:
content_type = response.get('content-type', '')
if not content_type.startswith('application/json'):
raise TypeError('Expected JSON, got %s' % content_type)
if isinstance(content, six.binary_type):
content = content.decode('utf-8')
return json.loads(content)
return content
def get_scoped_connection(klass, scopes):
implicit_credentials = get_credentials()
scoped_credentials = implicit_credentials.create_scoped(scopes)
return klass(credentials=scoped_credentials)
| true
| true
|
f70b1e86c28d848a3ed36c803e303c1039a3b3d1
| 2,642
|
py
|
Python
|
thorpy/elements/text.py
|
YannThorimbert/ThorPy-1.0
|
2855491e7d5016e9cbefb71784d169bb57cf8c73
|
[
"MIT"
] | 1
|
2020-02-23T13:06:02.000Z
|
2020-02-23T13:06:02.000Z
|
thorpy/elements/text.py
|
YannThorimbert/ThorPy-1.0
|
2855491e7d5016e9cbefb71784d169bb57cf8c73
|
[
"MIT"
] | null | null | null |
thorpy/elements/text.py
|
YannThorimbert/ThorPy-1.0
|
2855491e7d5016e9cbefb71784d169bb57cf8c73
|
[
"MIT"
] | null | null | null |
from __future__ import division
from thorpy.elements.element import Element
from thorpy.miscgui.constants import STATE_NORMAL
class OneLineText(Element):
def __init__(self, text="", elements=None, normal_params=None):
Element.__init__(self, text, elements, normal_params)
def finish(self):
self.set_style("text")
Element.finish(self)
class MultilineText(Element):
def __init__(self, text="", size=None, elements=None, normal_params=None):
Element.__init__(self, text, elements, normal_params)
self._size = size
self.visible = False
def finish(self):
Element.finish(self)
if not self._size:
self._size = self.get_fus_rect()
self.set_size(self._size)
for line in self.get_lines(STATE_NORMAL):
e = OneLineText(line)
e.finish()
e.set_writer(self.current_state.fusionner.title._writer)
self.add_elements([e])
self.format_txt()
def build_elements(self):
for e in self._elements:
e.father = None
self._elements = []
self._blit_before = []
self._blit_after = []
self.set_size(self._size)
for line in self.get_lines(STATE_NORMAL):
e = OneLineText(line)
e.finish()
e.set_writer(self.current_state.fusionner.title._writer)
self.add_elements([e])
self.format_txt()
def format_txt(self):
title = self._states[STATE_NORMAL].fusionner.title
(x, y) = title._pos
r = title.get_rect()
for i in self._elements:
(w, h) = i.get_fus_size()
if title._align is "left":
x = title._pos[0]
elif title._align is "center":
x = (r.width - w) // 2
elif title._align is "right":
x = r.width - w
i.set_topleft((x, y))
y += title._space + h
def set_font_color(self, color, state=None, center_title=True):
"""set font color for a given state"""
Element.set_font_color(self, color, state, center_title)
self.build_elements()
# remettre bonne couleur, etc
def set_font_size(self, size, state=None, center_title=True):
"""set font color for a given state"""
Element.set_font_size(self, size, state, center_title)
self.build_elements()
def set_font_effects(self, biu, state=None, center=True, preserve=False):
"""biu = tuple : (bold, italic, underline)"""
Element.set_font_effects(self, biu, state, center, preserve)
self.build_elements()
| 33.025
| 78
| 0.604845
|
from __future__ import division
from thorpy.elements.element import Element
from thorpy.miscgui.constants import STATE_NORMAL
class OneLineText(Element):
def __init__(self, text="", elements=None, normal_params=None):
Element.__init__(self, text, elements, normal_params)
def finish(self):
self.set_style("text")
Element.finish(self)
class MultilineText(Element):
def __init__(self, text="", size=None, elements=None, normal_params=None):
Element.__init__(self, text, elements, normal_params)
self._size = size
self.visible = False
def finish(self):
Element.finish(self)
if not self._size:
self._size = self.get_fus_rect()
self.set_size(self._size)
for line in self.get_lines(STATE_NORMAL):
e = OneLineText(line)
e.finish()
e.set_writer(self.current_state.fusionner.title._writer)
self.add_elements([e])
self.format_txt()
def build_elements(self):
for e in self._elements:
e.father = None
self._elements = []
self._blit_before = []
self._blit_after = []
self.set_size(self._size)
for line in self.get_lines(STATE_NORMAL):
e = OneLineText(line)
e.finish()
e.set_writer(self.current_state.fusionner.title._writer)
self.add_elements([e])
self.format_txt()
def format_txt(self):
title = self._states[STATE_NORMAL].fusionner.title
(x, y) = title._pos
r = title.get_rect()
for i in self._elements:
(w, h) = i.get_fus_size()
if title._align is "left":
x = title._pos[0]
elif title._align is "center":
x = (r.width - w) // 2
elif title._align is "right":
x = r.width - w
i.set_topleft((x, y))
y += title._space + h
def set_font_color(self, color, state=None, center_title=True):
Element.set_font_color(self, color, state, center_title)
self.build_elements()
def set_font_size(self, size, state=None, center_title=True):
Element.set_font_size(self, size, state, center_title)
self.build_elements()
def set_font_effects(self, biu, state=None, center=True, preserve=False):
Element.set_font_effects(self, biu, state, center, preserve)
self.build_elements()
| true
| true
|
f70b1f86cf5fd83b8b23b2fcca78763698db8f0f
| 114
|
py
|
Python
|
src/vm/__init__.py
|
mingz2013/lang-py
|
1788bae92cbc8b5f3f99d9ae1c45ea116d870d91
|
[
"Apache-2.0"
] | null | null | null |
src/vm/__init__.py
|
mingz2013/lang-py
|
1788bae92cbc8b5f3f99d9ae1c45ea116d870d91
|
[
"Apache-2.0"
] | null | null | null |
src/vm/__init__.py
|
mingz2013/lang-py
|
1788bae92cbc8b5f3f99d9ae1c45ea116d870d91
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@FileName: __init__.py
@Time: 2020/2/7 20:11
@Author: zhaojm
Module Description
"""
| 11.4
| 23
| 0.622807
| true
| true
|
|
f70b206f88d8d3a4cabcf553b9de5db1cefe513c
| 228
|
py
|
Python
|
sitepackages/djangae/models.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | null | null | null |
sitepackages/djangae/models.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | null | null | null |
sitepackages/djangae/models.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from djangae import patches
class CounterShard(models.Model):
count = models.PositiveIntegerField()
label = models.CharField(max_length=500)
class Meta:
app_label = "djangae"
| 19
| 44
| 0.723684
|
from django.db import models
from djangae import patches
class CounterShard(models.Model):
count = models.PositiveIntegerField()
label = models.CharField(max_length=500)
class Meta:
app_label = "djangae"
| true
| true
|
f70b2195d3e92beb097b41bf27615ee7cb7b8faa
| 489
|
py
|
Python
|
galeria/migrations/0006_alter_post_published.py
|
JoseDevApps/Pets
|
280e193c5bb293893a2baa547fcde0141f5db010
|
[
"MIT"
] | null | null | null |
galeria/migrations/0006_alter_post_published.py
|
JoseDevApps/Pets
|
280e193c5bb293893a2baa547fcde0141f5db010
|
[
"MIT"
] | null | null | null |
galeria/migrations/0006_alter_post_published.py
|
JoseDevApps/Pets
|
280e193c5bb293893a2baa547fcde0141f5db010
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-11-11 05:59
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galeria', '0005_auto_20211111_0052'),
]
operations = [
migrations.AlterField(
model_name='post',
name='published',
field=models.DateTimeField(default=datetime.datetime(2021, 11, 11, 5, 59, 15, 363915), verbose_name='Fecha de publicación'),
),
]
| 24.45
| 136
| 0.633947
|
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galeria', '0005_auto_20211111_0052'),
]
operations = [
migrations.AlterField(
model_name='post',
name='published',
field=models.DateTimeField(default=datetime.datetime(2021, 11, 11, 5, 59, 15, 363915), verbose_name='Fecha de publicación'),
),
]
| true
| true
|
f70b22555f264ff3a1b0984a03ecb595e0901e16
| 865
|
py
|
Python
|
practice/practice/spiders/authors.py
|
Soulzerz/py_web_crawler
|
13f66611703ce253ac85f914cabe3b851138f966
|
[
"MIT"
] | null | null | null |
practice/practice/spiders/authors.py
|
Soulzerz/py_web_crawler
|
13f66611703ce253ac85f914cabe3b851138f966
|
[
"MIT"
] | null | null | null |
practice/practice/spiders/authors.py
|
Soulzerz/py_web_crawler
|
13f66611703ce253ac85f914cabe3b851138f966
|
[
"MIT"
] | null | null | null |
from scrapy import Spider
class AuthorSpider(Spider):
name = 'author'
start_urls = [
'http://quotes.toscrape.com/',
]
def parse(self, response):
#follow links to author pages
for href in response.css('.author + a::attr(href)'):
yield response.follow(href, callback=self.parse_author)
#follow pagination links
for href in response.css('li.next a::attr(href)'):
yield response.follow(href, callback=self.parse)
def parse_author(self, response):
def extract_with_css(query):
return response.css(query).extract_first().strip()
yield{
'name': extract_with_css('h3.author-title::text'),
'birthdate': extract_with_css('.author-born-date::text'),
'bio': extract_with_css('.author-description::text')
}
| 34.6
| 69
| 0.60578
|
from scrapy import Spider
class AuthorSpider(Spider):
name = 'author'
start_urls = [
'http://quotes.toscrape.com/',
]
def parse(self, response):
for href in response.css('.author + a::attr(href)'):
yield response.follow(href, callback=self.parse_author)
for href in response.css('li.next a::attr(href)'):
yield response.follow(href, callback=self.parse)
def parse_author(self, response):
def extract_with_css(query):
return response.css(query).extract_first().strip()
yield{
'name': extract_with_css('h3.author-title::text'),
'birthdate': extract_with_css('.author-born-date::text'),
'bio': extract_with_css('.author-description::text')
}
| true
| true
|
f70b22fe0f0e714035cf9a82676dd1c359a9668f
| 6,912
|
py
|
Python
|
tests/use_cases/test_environments.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | null | null | null |
tests/use_cases/test_environments.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | 11
|
2020-06-07T12:29:21.000Z
|
2020-06-24T19:44:36.000Z
|
tests/use_cases/test_environments.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialogButtonBox
from . import get_main_window, close_application
NO_OF_ENVIRONMENTS = 5
NO_OF_ENVIRONMENTS_TO_DELETE = 3
NO_OF_ENVIRONMENTS_TO_RE_ADD = 1
def get_toolbar_environments_combo(window):
return window.environment_list_view.get_environment_list_combo()
def show_window(qtbot, clear_environments=True):
window = get_main_window()
qtbot.addWidget(window)
if clear_environments:
window.world.environment_store.clear_environments()
window.environment_view.show_dialog()
return window
def add_environments(qtbot, window, number):
for i in range(number):
qtbot.mouseClick(window.environment_view.btn_add_environment, QtCore.Qt.LeftButton)
def remove_environments(qtbot, window, number):
for i in range(number):
qtbot.mouseClick(window.environment_view.btn_remove_environment, QtCore.Qt.LeftButton)
def close_and_save_environments(qtbot, window):
ok_button = window.environment_view.btn_dialog_close.button(QDialogButtonBox.Ok)
qtbot.mouseClick(ok_button, QtCore.Qt.LeftButton)
def close_and_discard_changes(qtbot, window):
cancel_button = window.environment_view.btn_dialog_close.button(QDialogButtonBox.Cancel)
qtbot.mouseClick(cancel_button, QtCore.Qt.LeftButton)
def test_adding_removing_env(qtbot):
# given
window = show_window(qtbot)
# when
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# then
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS
# remove
remove_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# and close dialog
close_and_save_environments(qtbot, window)
# and re-open
window.environment_view.show_dialog()
# check environments in toolbar
assert get_toolbar_environments_combo(window).count() == 0
# then
assert window.environment_view.lst_environments.count() == 0
def test_renaming_environment(qtbot):
# given a window
window = show_window(qtbot)
# add a few environments
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# select an environment from list
window.environment_view.lst_environments.setCurrentRow(2)
currently_selected = window.environment_view.lst_environments.currentItem()
# edit list item
new_environment_name = "Development"
currently_selected.setText(new_environment_name)
# save and close application
close_and_save_environments(qtbot, window)
# get environments from controller
environments = [e.name for e in window.environment_list_view.world.environment_store.get_environments()]
assert new_environment_name in environments
def test_saving_envs(qtbot):
# given
window = show_window(qtbot)
# and (adding a few environments)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# when
close_and_save_environments(qtbot, window)
# then
environments = window.world.environment_store.get_environments()
assert len(environments) == NO_OF_ENVIRONMENTS, "Environments not being saved in database"
# and (re-opening the dialog box after close)
window.environment_view.show_dialog()
# then
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS, \
"Seems like the dialog box is reloading environments"
def test_loading_envs(qtbot):
# given
window = show_window(qtbot)
# and (adding a few environments)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# and (save)
close_and_save_environments(qtbot, window)
# and (close app)
close_application(window)
# when
window = show_window(qtbot, clear_environments=False)
# then
env_list_combo = get_toolbar_environments_combo(window)
assert env_list_combo.count() == NO_OF_ENVIRONMENTS, \
"Environments not loaded in toolbar on fresh re-start"
# and
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS, \
"Environments not being loaded from database on a fresh re-start"
def test_discard_envs_changes_on_cancel(qtbot):
# given
window = show_window(qtbot)
# when
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# then
close_and_discard_changes(qtbot, window)
# then
environments = window.world.environment_store.get_environments()
assert len(environments) == 0
def test_discard_envs_changes_on_esc(qtbot):
# given
window = show_window(qtbot)
# when
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# then
qtbot.keyClick(window.environment_view.lst_environments, Qt.Key_Escape)
# then
environments = window.world.environment_store.get_environments()
assert len(environments) == 0
def test_refresh_toolbar_after_adding_deleting_envs(qtbot):
# given
window = show_window(qtbot)
# and (adding a few environments)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# when (click ok to save environments)
close_and_save_environments(qtbot, window)
# then (check toolbar environments)
assert get_toolbar_environments_combo(window).count() == NO_OF_ENVIRONMENTS, \
"Environments not loaded in toolbar on after Environments Dialog close"
# and (re-opening the dialog box after close)
window.environment_view.show_dialog()
# and (delete 3 and add 1 environment(s))
remove_environments(qtbot, window, NO_OF_ENVIRONMENTS_TO_DELETE)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS_TO_RE_ADD)
# and (click ok to save environments)
close_and_save_environments(qtbot, window)
# then (check toolbar environments)
remaining_environments = NO_OF_ENVIRONMENTS - NO_OF_ENVIRONMENTS_TO_DELETE + NO_OF_ENVIRONMENTS_TO_RE_ADD
assert get_toolbar_environments_combo(window).count() == remaining_environments, \
"Environments not loaded in toolbar on (deleting/re-adding) after Environments Dialog close"
def test_update_currently_selected_environment(qtbot):
# given (a window with few environments)
window = show_window(qtbot)
# and
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# and
close_and_save_environments(qtbot, window)
# when (a new environment is selected from toolbar)
toolbar_environments = get_toolbar_environments_combo(window)
toolbar_environments.setCurrentIndex(3)
selected_environment = toolbar_environments.currentText()
# and application is closed
window.toolbar_controller.trigger_quit_application()
# and window is re-opened
window = show_window(qtbot)
# then the selected environment should be same as before
toolbar_environments = get_toolbar_environments_combo(window)
selected_environment_after_restart = toolbar_environments.currentText()
assert selected_environment == selected_environment_after_restart
| 30.183406
| 109
| 0.757813
|
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialogButtonBox
from . import get_main_window, close_application
NO_OF_ENVIRONMENTS = 5
NO_OF_ENVIRONMENTS_TO_DELETE = 3
NO_OF_ENVIRONMENTS_TO_RE_ADD = 1
def get_toolbar_environments_combo(window):
return window.environment_list_view.get_environment_list_combo()
def show_window(qtbot, clear_environments=True):
window = get_main_window()
qtbot.addWidget(window)
if clear_environments:
window.world.environment_store.clear_environments()
window.environment_view.show_dialog()
return window
def add_environments(qtbot, window, number):
for i in range(number):
qtbot.mouseClick(window.environment_view.btn_add_environment, QtCore.Qt.LeftButton)
def remove_environments(qtbot, window, number):
for i in range(number):
qtbot.mouseClick(window.environment_view.btn_remove_environment, QtCore.Qt.LeftButton)
def close_and_save_environments(qtbot, window):
ok_button = window.environment_view.btn_dialog_close.button(QDialogButtonBox.Ok)
qtbot.mouseClick(ok_button, QtCore.Qt.LeftButton)
def close_and_discard_changes(qtbot, window):
cancel_button = window.environment_view.btn_dialog_close.button(QDialogButtonBox.Cancel)
qtbot.mouseClick(cancel_button, QtCore.Qt.LeftButton)
def test_adding_removing_env(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS
remove_environments(qtbot, window, NO_OF_ENVIRONMENTS)
close_and_save_environments(qtbot, window)
window.environment_view.show_dialog()
assert get_toolbar_environments_combo(window).count() == 0
assert window.environment_view.lst_environments.count() == 0
def test_renaming_environment(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
window.environment_view.lst_environments.setCurrentRow(2)
currently_selected = window.environment_view.lst_environments.currentItem()
new_environment_name = "Development"
currently_selected.setText(new_environment_name)
close_and_save_environments(qtbot, window)
environments = [e.name for e in window.environment_list_view.world.environment_store.get_environments()]
assert new_environment_name in environments
def test_saving_envs(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
close_and_save_environments(qtbot, window)
environments = window.world.environment_store.get_environments()
assert len(environments) == NO_OF_ENVIRONMENTS, "Environments not being saved in database"
window.environment_view.show_dialog()
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS, \
"Seems like the dialog box is reloading environments"
def test_loading_envs(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
close_and_save_environments(qtbot, window)
close_application(window)
window = show_window(qtbot, clear_environments=False)
env_list_combo = get_toolbar_environments_combo(window)
assert env_list_combo.count() == NO_OF_ENVIRONMENTS, \
"Environments not loaded in toolbar on fresh re-start"
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS, \
"Environments not being loaded from database on a fresh re-start"
def test_discard_envs_changes_on_cancel(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
close_and_discard_changes(qtbot, window)
environments = window.world.environment_store.get_environments()
assert len(environments) == 0
def test_discard_envs_changes_on_esc(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
qtbot.keyClick(window.environment_view.lst_environments, Qt.Key_Escape)
environments = window.world.environment_store.get_environments()
assert len(environments) == 0
def test_refresh_toolbar_after_adding_deleting_envs(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
close_and_save_environments(qtbot, window)
assert get_toolbar_environments_combo(window).count() == NO_OF_ENVIRONMENTS, \
"Environments not loaded in toolbar on after Environments Dialog close"
window.environment_view.show_dialog()
remove_environments(qtbot, window, NO_OF_ENVIRONMENTS_TO_DELETE)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS_TO_RE_ADD)
close_and_save_environments(qtbot, window)
remaining_environments = NO_OF_ENVIRONMENTS - NO_OF_ENVIRONMENTS_TO_DELETE + NO_OF_ENVIRONMENTS_TO_RE_ADD
assert get_toolbar_environments_combo(window).count() == remaining_environments, \
"Environments not loaded in toolbar on (deleting/re-adding) after Environments Dialog close"
def test_update_currently_selected_environment(qtbot):
window = show_window(qtbot)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
close_and_save_environments(qtbot, window)
toolbar_environments = get_toolbar_environments_combo(window)
toolbar_environments.setCurrentIndex(3)
selected_environment = toolbar_environments.currentText()
window.toolbar_controller.trigger_quit_application()
window = show_window(qtbot)
toolbar_environments = get_toolbar_environments_combo(window)
selected_environment_after_restart = toolbar_environments.currentText()
assert selected_environment == selected_environment_after_restart
| true
| true
|
f70b230a8610bab237b7c392f0f4b340a61d5e65
| 1,343
|
py
|
Python
|
tests/integration/test_main.py
|
benjaminkaplanphd/traveling-salesperson
|
5c788554fe90eeb81b6351aeec96f1d64caa7591
|
[
"MIT"
] | null | null | null |
tests/integration/test_main.py
|
benjaminkaplanphd/traveling-salesperson
|
5c788554fe90eeb81b6351aeec96f1d64caa7591
|
[
"MIT"
] | null | null | null |
tests/integration/test_main.py
|
benjaminkaplanphd/traveling-salesperson
|
5c788554fe90eeb81b6351aeec96f1d64caa7591
|
[
"MIT"
] | null | null | null |
"""
Integration tests for __main__.py
"""
# pragma pylint: disable=redefined-outer-name
from click.testing import CliRunner
import pytest
from traveling_salesperson import __main__ as main
def test_main_runs(mocker, filename_fixture):
"""Ensures that main() runs smoothly over a test file."""
mock_etl = mocker.spy(main, 'etl')
mock_distance = mocker.spy(main, 'distance_matrix')
mock_path = mocker.spy(main, 'determine_path')
mock_plot = mocker.spy(main, 'plot_path')
# Test cli interface
runner = CliRunner()
result = runner.invoke(main.main, ['-f', filename_fixture])
assert result.exit_code == 0
mock_etl.assert_called_once_with(filename_fixture)
mock_distance.assert_called_once()
mock_path.assert_called_once()
mock_plot.assert_called_once()
@pytest.mark.parametrize('arg_list,error_code',
[(['-x', 'bad_arg'], 2), # Command line error
(['-m', 'de-sitter'], 2), # Command line error
(['-f', 'bad_file'], 1)]) # File not found error
def test_main_fails_with_bad_argument(arg_list, error_code):
"""Ensures that main() has an error (code -1) when run with unsupported arguments."""
runner = CliRunner()
result = runner.invoke(main.main, arg_list)
assert result.exit_code == error_code
| 35.342105
| 89
| 0.673864
|
from click.testing import CliRunner
import pytest
from traveling_salesperson import __main__ as main
def test_main_runs(mocker, filename_fixture):
mock_etl = mocker.spy(main, 'etl')
mock_distance = mocker.spy(main, 'distance_matrix')
mock_path = mocker.spy(main, 'determine_path')
mock_plot = mocker.spy(main, 'plot_path')
runner = CliRunner()
result = runner.invoke(main.main, ['-f', filename_fixture])
assert result.exit_code == 0
mock_etl.assert_called_once_with(filename_fixture)
mock_distance.assert_called_once()
mock_path.assert_called_once()
mock_plot.assert_called_once()
@pytest.mark.parametrize('arg_list,error_code',
[(['-x', 'bad_arg'], 2),
(['-m', 'de-sitter'], 2),
(['-f', 'bad_file'], 1)])
def test_main_fails_with_bad_argument(arg_list, error_code):
runner = CliRunner()
result = runner.invoke(main.main, arg_list)
assert result.exit_code == error_code
| true
| true
|
f70b236aca7e96af4bd08a8c9e8e52cae3f487e5
| 544
|
py
|
Python
|
src/astrolib/util/constants.py
|
space-geek/integrationutils
|
384375702a6c053aa2e5aaca6b9d5c43d86a16ad
|
[
"MIT"
] | null | null | null |
src/astrolib/util/constants.py
|
space-geek/integrationutils
|
384375702a6c053aa2e5aaca6b9d5c43d86a16ad
|
[
"MIT"
] | null | null | null |
src/astrolib/util/constants.py
|
space-geek/integrationutils
|
384375702a6c053aa2e5aaca6b9d5c43d86a16ad
|
[
"MIT"
] | null | null | null |
""" TODO Module docstring
"""
# Threshold value under which a float will be treated as zero
MAX_ZERO_THRESHOLD_VALUE = 1.0e-14
# Minimum integration step size, in seconds
MINIMUM_STEP_SIZE_IN_SECONDS = 1.0e-9
# Number of whole nanoseconds per second
NANOSECONDS_PER_SECOND = int(1e9)
# Number of seconds per mean solar day
SECONDS_PER_SOLAR_DAY = 86400.0
# Number of seconds per minute
SECONDS_PER_MINUTE = 60.0
# Number of seconds per hour
SECONDS_PER_HOUR = 3600.0
# Earth gravitational constant, km^3 / s^2
EARTH_MU = 3.986004418e5
| 21.76
| 61
| 0.773897
|
MAX_ZERO_THRESHOLD_VALUE = 1.0e-14
MINIMUM_STEP_SIZE_IN_SECONDS = 1.0e-9
NANOSECONDS_PER_SECOND = int(1e9)
SECONDS_PER_SOLAR_DAY = 86400.0
SECONDS_PER_MINUTE = 60.0
SECONDS_PER_HOUR = 3600.0
EARTH_MU = 3.986004418e5
| true
| true
|
f70b23f1200f4265cbd2958a15e879a5f263f877
| 10,005
|
py
|
Python
|
src/dataload/__init__.py
|
karawallace/mygene
|
35bf066eb50bc929b4bb4e2423d47b4c98797526
|
[
"Apache-2.0"
] | null | null | null |
src/dataload/__init__.py
|
karawallace/mygene
|
35bf066eb50bc929b4bb4e2423d47b4c98797526
|
[
"Apache-2.0"
] | null | null | null |
src/dataload/__init__.py
|
karawallace/mygene
|
35bf066eb50bc929b4bb4e2423d47b4c98797526
|
[
"Apache-2.0"
] | null | null | null |
'''data_load module is for loading individual genedocs from various data sources.'''
from __future__ import print_function
import sys
import copy
import types
import time
import datetime
import importlib
from biothings.utils.mongo import get_src_conn, get_src_dump, get_data_folder
from biothings.utils.common import get_timestamp, get_random_string, timesofar, dump2gridfs, iter_n
from config import DATA_SRC_DATABASE, DATA_SRC_MASTER_COLLECTION
__sources_dict__ = {
'entrez': [
'entrez.entrez_gene',
'entrez.entrez_homologene',
'entrez.entrez_genesummary',
'entrez.entrez_accession',
'entrez.entrez_refseq',
'entrez.entrez_unigene',
'entrez.entrez_go',
'entrez.entrez_ec',
'entrez.entrez_retired',
'entrez.entrez_generif',
'entrez.entrez_genomic_pos',
],
'ensembl': [
'ensembl.ensembl_gene',
'ensembl.ensembl_acc',
'ensembl.ensembl_genomic_pos',
'ensembl.ensembl_prosite',
'ensembl.ensembl_interpro',
'ensembl.ensembl_pfam'
],
'uniprot': [
'uniprot',
'uniprot.uniprot_pdb',
# 'uniprot.uniprot_ipi', # IPI is now discontinued, last update is still in the db, but won't be updated.
'uniprot.uniprot_pir'
],
'pharmgkb': ['pharmgkb'],
'reporter': ['reporter'],
'ucsc': ['ucsc.ucsc_exons'],
'exac': ['exac.broadinstitute_exac'],
'cpdb': ['cpdb'],
'reagent': ['reagent'],
}
__sources__ = None # should be a list defined at runtime
conn = get_src_conn()
doc_register = {}
class GeneDocSourceMaster(dict):
'''A class to manage various genedoc data sources.'''
__collection__ = DATA_SRC_MASTER_COLLECTION
__database__ = DATA_SRC_DATABASE
use_dot_notation = True
use_schemaless = True
structure = {
'name': str,
'timestamp': datetime.datetime,
}
class GeneDocSource(dict):
'''A base class for all source data.'''
__collection__ = None # should be specified individually
__database__ = DATA_SRC_DATABASE
use_dot_notation = True
use_schemaless = True
DEFAULT_FIELDTYPE = str
temp_collection = None # temp collection is for dataloading
def make_temp_collection(self):
'''Create a temp collection for dataloading, e.g., entrez_geneinfo_INEMO.'''
new_collection = None
while 1:
new_collection = self.__collection__ + '_temp_' + get_random_string()
if new_collection not in self.db.collection_names():
break
self.temp_collection = self.db[new_collection]
return new_collection
def doc_iterator(self, genedoc_d, batch=True, step=10000):
if isinstance(genedoc_d, types.GeneratorType) and batch:
for doc_li in iter_n(genedoc_d, n=step):
yield doc_li
else:
if batch:
doc_li = []
i = 0
for _id, doc in genedoc_d.items():
doc['_id'] = _id
_doc = copy.copy(self)
_doc.clear()
_doc.update(doc)
#if validate:
# _doc.validate()
if batch:
doc_li.append(_doc)
i += 1
if i % step == 0:
yield doc_li
doc_li = []
else:
yield _doc
if batch:
yield doc_li
def load(self, genedoc_d=None, update_data=True, update_master=True, test=False, step=10000):
if not self.temp_collection:
self.make_temp_collection()
self.temp_collection.drop() # drop all existing records just in case.
if update_data:
genedoc_d = genedoc_d or self.load_genedoc()
print("genedoc_d mem: %s" % sys.getsizeof(genedoc_d))
print("Uploading to the DB...", end='')
t0 = time.time()
# for doc in self.doc_iterator(genedoc_d, batch=False):
# if not test:
# doc.save()
for doc_li in self.doc_iterator(genedoc_d, batch=True, step=step):
if not test:
self.temp_collection.insert(doc_li, manipulate=False, check_keys=False)
print('Done[%s]' % timesofar(t0))
self.switch_collection()
if getattr(self, 'ENTREZ_GENEDOC_ROOT', False):
print('Uploading "geneid_d" to GridFS...', end='')
t0 = time.time()
geneid_d = self.get_geneid_d()
dump2gridfs(geneid_d, self.__collection__ + '__geneid_d.pyobj', self.db)
print('Done[%s]' % timesofar(t0))
if getattr(self, 'ENSEMBL_GENEDOC_ROOT', False):
print('Uploading "mapping2entrezgene" to GridFS...', end='')
t0 = time.time()
x2entrezgene_list = self.get_mapping_to_entrez()
dump2gridfs(x2entrezgene_list, self.__collection__ + '__2entrezgene_list.pyobj', self.db)
print('Done[%s]' % timesofar(t0))
if update_master:
# update src_master collection
if not test:
_doc = {"_id": str(self.__collection__),
"name": str(self.__collection__),
"timestamp": datetime.datetime.now()}
for attr in ['ENTREZ_GENEDOC_ROOT', 'ENSEMBL_GENEDOC_ROOT', 'id_type']:
if hasattr(self, attr):
_doc[attr] = getattr(self, attr)
if hasattr(self, 'get_mapping'):
_doc['mapping'] = getattr(self, 'get_mapping')()
coll = conn[GeneDocSourceMaster.__database__][GeneDocSourceMaster.__collection__]
dkey = {"_id": _doc["_id"]}
prev = coll.find_one(dkey)
if prev:
coll.replace_one(dkey, _doc)
else:
coll.insert_one(_doc)
def switch_collection(self):
'''after a successful loading, rename temp_collection to regular collection name,
and renaming existing collection to a temp name for archiving purpose.
'''
if self.temp_collection and self.temp_collection.count() > 0:
if self.collection.count() > 0:
# renaming existing collections
new_name = '_'.join([self.__collection__, 'archive', get_timestamp(), get_random_string()])
self.collection.rename(new_name, dropTarget=True)
self.temp_collection.rename(self.__collection__)
else:
print("Error: load data first.")
@property
def collection(self):
return self.db[self.__collection__]
#def validate_all(self, genedoc_d=None):
# """validate all genedoc_d."""
# genedoc_d = genedoc_d or self.load_genedoc()
# for doc in self.doc_iterator(genedoc_d, batch=False, validate=True):
# pass
def register_sources():
for src in __sources__:
src_m = importlib.import_module('dataload.sources.' + src)
metadata = src_m.__metadata__
name = src + '_doc'
metadata['load_genedoc'] = src_m.load_genedoc
metadata['get_mapping'] = src_m.get_mapping
if metadata.get('ENTREZ_GENEDOC_ROOT', False):
metadata['get_geneid_d'] = src_m.get_geneid_d
if metadata.get('ENSEMBL_GENEDOC_ROOT', False):
metadata['get_mapping_to_entrez'] = src_m.get_mapping_to_entrez
src_cls = type(name, (GeneDocSource,), metadata)
# manually propagate db attr
src_cls.db = conn[src_cls.__database__]
doc_register[name] = src_cls
conn.register(src_cls)
# register_sources()
def get_src(src):
_src = conn[src + '_doc']()
return _src
def load_src(src, **kwargs):
_src = doc_register[src + '_doc']()
_src.load(**kwargs)
def update_mapping(src):
_src = conn[src + '_doc']()
_src.load(update_data=False, update_master=True)
def load_all(**kwargs):
for src in __sources__:
load_src(src, **kwargs)
def get_mapping():
mapping = {}
properties = {}
for src in __sources__:
print("Loading mapping from %s..." % src)
_src = conn[src + '_doc']()
_field_properties = _src.get_mapping()
properties.update(_field_properties)
mapping["properties"] = properties
# enable _source compression
mapping["_source"] = {"enabled": True,
"compress": True,
"compression_threshold": "1kb"}
return mapping
def update_mapping():
for src in __sources__:
colname = src.split(".")[-1]
col = conn[colname]
regdoc = doc_register[src + '_doc']
mastercol = conn[GeneDocSourceMaster.__database__][GeneDocSourceMaster.__collection__]
_doc = {"_id": str(colname),
"name": str(colname),
"timestamp": datetime.datetime.now(),
"mapping" : regdoc.get_mapping(regdoc)}
print("Updating mapping for source: %s" % repr(colname))
dkey = {"_id": _doc["_id"]}
prev = mastercol.find_one(dkey)
if prev:
mastercol.replace_one(dkey, _doc)
else:
mastercol.insert_one(_doc)
def main():
'''
Example:
python -m dataload ensembl.ensembl_gene ensembl.ensembl_acc ensembl.ensembl_genomic_pos ensembl.ensembl_prosite ensembl.ensembl_interpro
python -m dataload/__init__ entrez.entrez_gene entrez.entrez_homologene entrez.entrez_genesummary
entrez.entrez_accession entrez.entrez_refseq entrez.entrez_unigene entrez.entrez_go
entrez.entrez_ec entrez.entrez_retired
'''
global __sources__
__sources__ = sys.argv[1:]
register_sources()
load_all()
if __name__ == '__main__':
main()
| 35.105263
| 144
| 0.593303
|
from __future__ import print_function
import sys
import copy
import types
import time
import datetime
import importlib
from biothings.utils.mongo import get_src_conn, get_src_dump, get_data_folder
from biothings.utils.common import get_timestamp, get_random_string, timesofar, dump2gridfs, iter_n
from config import DATA_SRC_DATABASE, DATA_SRC_MASTER_COLLECTION
__sources_dict__ = {
'entrez': [
'entrez.entrez_gene',
'entrez.entrez_homologene',
'entrez.entrez_genesummary',
'entrez.entrez_accession',
'entrez.entrez_refseq',
'entrez.entrez_unigene',
'entrez.entrez_go',
'entrez.entrez_ec',
'entrez.entrez_retired',
'entrez.entrez_generif',
'entrez.entrez_genomic_pos',
],
'ensembl': [
'ensembl.ensembl_gene',
'ensembl.ensembl_acc',
'ensembl.ensembl_genomic_pos',
'ensembl.ensembl_prosite',
'ensembl.ensembl_interpro',
'ensembl.ensembl_pfam'
],
'uniprot': [
'uniprot',
'uniprot.uniprot_pdb',
r': ['reporter'],
'ucsc': ['ucsc.ucsc_exons'],
'exac': ['exac.broadinstitute_exac'],
'cpdb': ['cpdb'],
'reagent': ['reagent'],
}
__sources__ = None # should be a list defined at runtime
conn = get_src_conn()
doc_register = {}
class GeneDocSourceMaster(dict):
__collection__ = DATA_SRC_MASTER_COLLECTION
__database__ = DATA_SRC_DATABASE
use_dot_notation = True
use_schemaless = True
structure = {
'name': str,
'timestamp': datetime.datetime,
}
class GeneDocSource(dict):
__collection__ = None # should be specified individually
__database__ = DATA_SRC_DATABASE
use_dot_notation = True
use_schemaless = True
DEFAULT_FIELDTYPE = str
temp_collection = None # temp collection is for dataloading
def make_temp_collection(self):
new_collection = None
while 1:
new_collection = self.__collection__ + '_temp_' + get_random_string()
if new_collection not in self.db.collection_names():
break
self.temp_collection = self.db[new_collection]
return new_collection
def doc_iterator(self, genedoc_d, batch=True, step=10000):
if isinstance(genedoc_d, types.GeneratorType) and batch:
for doc_li in iter_n(genedoc_d, n=step):
yield doc_li
else:
if batch:
doc_li = []
i = 0
for _id, doc in genedoc_d.items():
doc['_id'] = _id
_doc = copy.copy(self)
_doc.clear()
_doc.update(doc)
#if validate:
# _doc.validate()
if batch:
doc_li.append(_doc)
i += 1
if i % step == 0:
yield doc_li
doc_li = []
else:
yield _doc
if batch:
yield doc_li
def load(self, genedoc_d=None, update_data=True, update_master=True, test=False, step=10000):
if not self.temp_collection:
self.make_temp_collection()
self.temp_collection.drop() # drop all existing records just in case.
if update_data:
genedoc_d = genedoc_d or self.load_genedoc()
print("genedoc_d mem: %s" % sys.getsizeof(genedoc_d))
print("Uploading to the DB...", end='')
t0 = time.time()
# for doc in self.doc_iterator(genedoc_d, batch=False):
# if not test:
# doc.save()
for doc_li in self.doc_iterator(genedoc_d, batch=True, step=step):
if not test:
self.temp_collection.insert(doc_li, manipulate=False, check_keys=False)
print('Done[%s]' % timesofar(t0))
self.switch_collection()
if getattr(self, 'ENTREZ_GENEDOC_ROOT', False):
print('Uploading "geneid_d" to GridFS...', end='')
t0 = time.time()
geneid_d = self.get_geneid_d()
dump2gridfs(geneid_d, self.__collection__ + '__geneid_d.pyobj', self.db)
print('Done[%s]' % timesofar(t0))
if getattr(self, 'ENSEMBL_GENEDOC_ROOT', False):
print('Uploading "mapping2entrezgene" to GridFS...', end='')
t0 = time.time()
x2entrezgene_list = self.get_mapping_to_entrez()
dump2gridfs(x2entrezgene_list, self.__collection__ + '__2entrezgene_list.pyobj', self.db)
print('Done[%s]' % timesofar(t0))
if update_master:
# update src_master collection
if not test:
_doc = {"_id": str(self.__collection__),
"name": str(self.__collection__),
"timestamp": datetime.datetime.now()}
for attr in ['ENTREZ_GENEDOC_ROOT', 'ENSEMBL_GENEDOC_ROOT', 'id_type']:
if hasattr(self, attr):
_doc[attr] = getattr(self, attr)
if hasattr(self, 'get_mapping'):
_doc['mapping'] = getattr(self, 'get_mapping')()
coll = conn[GeneDocSourceMaster.__database__][GeneDocSourceMaster.__collection__]
dkey = {"_id": _doc["_id"]}
prev = coll.find_one(dkey)
if prev:
coll.replace_one(dkey, _doc)
else:
coll.insert_one(_doc)
def switch_collection(self):
if self.temp_collection and self.temp_collection.count() > 0:
if self.collection.count() > 0:
# renaming existing collections
new_name = '_'.join([self.__collection__, 'archive', get_timestamp(), get_random_string()])
self.collection.rename(new_name, dropTarget=True)
self.temp_collection.rename(self.__collection__)
else:
print("Error: load data first.")
@property
def collection(self):
return self.db[self.__collection__]
#def validate_all(self, genedoc_d=None):
# """validate all genedoc_d."""
# genedoc_d = genedoc_d or self.load_genedoc()
# for doc in self.doc_iterator(genedoc_d, batch=False, validate=True):
# pass
def register_sources():
for src in __sources__:
src_m = importlib.import_module('dataload.sources.' + src)
metadata = src_m.__metadata__
name = src + '_doc'
metadata['load_genedoc'] = src_m.load_genedoc
metadata['get_mapping'] = src_m.get_mapping
if metadata.get('ENTREZ_GENEDOC_ROOT', False):
metadata['get_geneid_d'] = src_m.get_geneid_d
if metadata.get('ENSEMBL_GENEDOC_ROOT', False):
metadata['get_mapping_to_entrez'] = src_m.get_mapping_to_entrez
src_cls = type(name, (GeneDocSource,), metadata)
# manually propagate db attr
src_cls.db = conn[src_cls.__database__]
doc_register[name] = src_cls
conn.register(src_cls)
# register_sources()
def get_src(src):
_src = conn[src + '_doc']()
return _src
def load_src(src, **kwargs):
_src = doc_register[src + '_doc']()
_src.load(**kwargs)
def update_mapping(src):
_src = conn[src + '_doc']()
_src.load(update_data=False, update_master=True)
def load_all(**kwargs):
for src in __sources__:
load_src(src, **kwargs)
def get_mapping():
mapping = {}
properties = {}
for src in __sources__:
print("Loading mapping from %s..." % src)
_src = conn[src + '_doc']()
_field_properties = _src.get_mapping()
properties.update(_field_properties)
mapping["properties"] = properties
# enable _source compression
mapping["_source"] = {"enabled": True,
"compress": True,
"compression_threshold": "1kb"}
return mapping
def update_mapping():
for src in __sources__:
colname = src.split(".")[-1]
col = conn[colname]
regdoc = doc_register[src + '_doc']
mastercol = conn[GeneDocSourceMaster.__database__][GeneDocSourceMaster.__collection__]
_doc = {"_id": str(colname),
"name": str(colname),
"timestamp": datetime.datetime.now(),
"mapping" : regdoc.get_mapping(regdoc)}
print("Updating mapping for source: %s" % repr(colname))
dkey = {"_id": _doc["_id"]}
prev = mastercol.find_one(dkey)
if prev:
mastercol.replace_one(dkey, _doc)
else:
mastercol.insert_one(_doc)
def main():
global __sources__
__sources__ = sys.argv[1:]
register_sources()
load_all()
if __name__ == '__main__':
main()
| true
| true
|
f70b274505cb775f5dfe8ee0c0eddac1fc9d3788
| 798
|
py
|
Python
|
rendering/tasks.py
|
everyvoter/everyvoter
|
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
|
[
"MIT"
] | 5
|
2019-07-01T17:50:44.000Z
|
2022-02-20T02:44:42.000Z
|
rendering/tasks.py
|
everyvoter/everyvoter
|
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
|
[
"MIT"
] | 3
|
2020-06-05T21:44:33.000Z
|
2021-06-10T21:39:26.000Z
|
rendering/tasks.py
|
everyvoter/everyvoter
|
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
|
[
"MIT"
] | 1
|
2021-12-09T06:32:40.000Z
|
2021-12-09T06:32:40.000Z
|
"""Rendering Related Tasks"""
from celery import shared_task
import newrelic.agent
from rendering.render_email import compose_email
from mailer.mailserver import deliver
@shared_task
def sample_email(to_address, user_id, email_id, election_id, district_ids):
"""Sample an email to an end user"""
result = compose_email(
user_id,
email_id,
election_id,
district_ids)
newrelic.agent.add_custom_parameter(
'organization_id', result['organization_id'])
newrelic.agent.add_custom_parameter(
'email_id', result['email_id'])
final_subject = u'[sample] {}'.format(result['subject'])
deliver(
to_address=to_address,
from_address=result['from_address'],
subject=final_subject,
html=result['body'])
| 26.6
| 75
| 0.692982
|
from celery import shared_task
import newrelic.agent
from rendering.render_email import compose_email
from mailer.mailserver import deliver
@shared_task
def sample_email(to_address, user_id, email_id, election_id, district_ids):
result = compose_email(
user_id,
email_id,
election_id,
district_ids)
newrelic.agent.add_custom_parameter(
'organization_id', result['organization_id'])
newrelic.agent.add_custom_parameter(
'email_id', result['email_id'])
final_subject = u'[sample] {}'.format(result['subject'])
deliver(
to_address=to_address,
from_address=result['from_address'],
subject=final_subject,
html=result['body'])
| true
| true
|
f70b27fea3ce5edeff7e9b072b5f43440d39c19d
| 3,763
|
py
|
Python
|
staff_manage_sdk/model/cmdb_extend/idcrack_unit_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
webshell_sdk/model/cmdb_extend/idcrack_unit_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
webshell_sdk/model/cmdb_extend/idcrack_unit_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: idcrack_unit_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='idcrack_unit_info.proto',
package='cmdb_extend',
syntax='proto3',
serialized_options=_b('ZEgo.easyops.local/contracts/protorepo-models/easyops/model/cmdb_extend'),
serialized_pb=_b('\n\x17idcrack_unit_info.proto\x12\x0b\x63mdb_extend\x1a\x1cgoogle/protobuf/struct.proto\"m\n\x0fIdcrackUnitInfo\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12\x0c\n\x04unum\x18\x02 \x01(\x05\x12\x0c\n\x04name\x18\x03 \x01(\t\x12)\n\x08unitInfo\x18\x04 \x01(\x0b\x32\x17.google.protobuf.StructBGZEgo.easyops.local/contracts/protorepo-models/easyops/model/cmdb_extendb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_IDCRACKUNITINFO = _descriptor.Descriptor(
name='IdcrackUnitInfo',
full_name='cmdb_extend.IdcrackUnitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instance_id', full_name='cmdb_extend.IdcrackUnitInfo.instance_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unum', full_name='cmdb_extend.IdcrackUnitInfo.unum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='cmdb_extend.IdcrackUnitInfo.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unitInfo', full_name='cmdb_extend.IdcrackUnitInfo.unitInfo', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=179,
)
_IDCRACKUNITINFO.fields_by_name['unitInfo'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['IdcrackUnitInfo'] = _IDCRACKUNITINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IdcrackUnitInfo = _reflection.GeneratedProtocolMessageType('IdcrackUnitInfo', (_message.Message,), {
'DESCRIPTOR' : _IDCRACKUNITINFO,
'__module__' : 'idcrack_unit_info_pb2'
# @@protoc_insertion_point(class_scope:cmdb_extend.IdcrackUnitInfo)
})
_sym_db.RegisterMessage(IdcrackUnitInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.197917
| 396
| 0.766144
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='idcrack_unit_info.proto',
package='cmdb_extend',
syntax='proto3',
serialized_options=_b('ZEgo.easyops.local/contracts/protorepo-models/easyops/model/cmdb_extend'),
serialized_pb=_b('\n\x17idcrack_unit_info.proto\x12\x0b\x63mdb_extend\x1a\x1cgoogle/protobuf/struct.proto\"m\n\x0fIdcrackUnitInfo\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12\x0c\n\x04unum\x18\x02 \x01(\x05\x12\x0c\n\x04name\x18\x03 \x01(\t\x12)\n\x08unitInfo\x18\x04 \x01(\x0b\x32\x17.google.protobuf.StructBGZEgo.easyops.local/contracts/protorepo-models/easyops/model/cmdb_extendb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_IDCRACKUNITINFO = _descriptor.Descriptor(
name='IdcrackUnitInfo',
full_name='cmdb_extend.IdcrackUnitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instance_id', full_name='cmdb_extend.IdcrackUnitInfo.instance_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unum', full_name='cmdb_extend.IdcrackUnitInfo.unum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='cmdb_extend.IdcrackUnitInfo.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unitInfo', full_name='cmdb_extend.IdcrackUnitInfo.unitInfo', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=179,
)
_IDCRACKUNITINFO.fields_by_name['unitInfo'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['IdcrackUnitInfo'] = _IDCRACKUNITINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IdcrackUnitInfo = _reflection.GeneratedProtocolMessageType('IdcrackUnitInfo', (_message.Message,), {
'DESCRIPTOR' : _IDCRACKUNITINFO,
'__module__' : 'idcrack_unit_info_pb2'
# @@protoc_insertion_point(class_scope:cmdb_extend.IdcrackUnitInfo)
})
_sym_db.RegisterMessage(IdcrackUnitInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
f70b2818b2e1e54a65dde52029d6950bf731af54
| 1,296
|
py
|
Python
|
ThreeBotPackages/threebot/capacity/package.py
|
grimpy/jumpscaleX_threebot
|
81aab3f049b2b353c247cd2c9eecd759a34a64c3
|
[
"Apache-2.0"
] | null | null | null |
ThreeBotPackages/threebot/capacity/package.py
|
grimpy/jumpscaleX_threebot
|
81aab3f049b2b353c247cd2c9eecd759a34a64c3
|
[
"Apache-2.0"
] | null | null | null |
ThreeBotPackages/threebot/capacity/package.py
|
grimpy/jumpscaleX_threebot
|
81aab3f049b2b353c247cd2c9eecd759a34a64c3
|
[
"Apache-2.0"
] | null | null | null |
from Jumpscale import j
class Package(j.baseclasses.threebot_package):
def prepare(self):
"""
is called at install time
:return:
"""
pass
def start(self):
"""
called when the 3bot starts
:return:
"""
## TODO: BAD
# self.db.models_add(path=self.package_root + "/models")
# self.gedis_server.actors_add(j.sal.fs.joinPaths(self.package_root, "actors"))
server = self.openresty
website = server.get_from_port(443)
locations = website.locations.get("threebotapp_locations")
website_location = locations.locations_spa.new()
website_location.name = "capacity"
website_location.path_url = "/capacity"
# website_location.use_jumpscale_weblibs = False
fullpath = j.sal.fs.joinPaths(self.package_root, "html/")
website_location.path_location = fullpath
locations.configure()
website.configure()
def stop(self):
"""
called when the 3bot stops
:return:
"""
pass
def uninstall(self):
"""
called when the package is no longer needed and will be removed from the threebot
:return:
"""
# TODO: clean up bcdb ?
pass
| 25.411765
| 89
| 0.588735
|
from Jumpscale import j
class Package(j.baseclasses.threebot_package):
def prepare(self):
pass
def start(self):
server = self.openresty
website = server.get_from_port(443)
locations = website.locations.get("threebotapp_locations")
website_location = locations.locations_spa.new()
website_location.name = "capacity"
website_location.path_url = "/capacity"
fullpath = j.sal.fs.joinPaths(self.package_root, "html/")
website_location.path_location = fullpath
locations.configure()
website.configure()
def stop(self):
pass
def uninstall(self):
pass
| true
| true
|
f70b281ecb804bd367a615bc4a4bbf8209ed8eb9
| 101
|
py
|
Python
|
classwork1/classworkApp1/apps.py
|
cs-fullstack-2019-spring/django-intro1-cw-itayanna
|
5c4d577f890991ef78c2f98203c8deda65c04357
|
[
"Apache-2.0"
] | null | null | null |
classwork1/classworkApp1/apps.py
|
cs-fullstack-2019-spring/django-intro1-cw-itayanna
|
5c4d577f890991ef78c2f98203c8deda65c04357
|
[
"Apache-2.0"
] | null | null | null |
classwork1/classworkApp1/apps.py
|
cs-fullstack-2019-spring/django-intro1-cw-itayanna
|
5c4d577f890991ef78c2f98203c8deda65c04357
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class Classworkapp1Config(AppConfig):
name = 'classworkApp1'
| 16.833333
| 37
| 0.782178
|
from django.apps import AppConfig
class Classworkapp1Config(AppConfig):
name = 'classworkApp1'
| true
| true
|
f70b29e2ae59baf04fbe095ef1fe4e2a9c27ec3a
| 7,212
|
py
|
Python
|
plyse/term_parser.py
|
arcodergh/plyse
|
bb44543f9c812401489ceba68b24b8618d263830
|
[
"MIT"
] | 26
|
2016-05-31T14:45:24.000Z
|
2021-04-27T01:54:52.000Z
|
plyse/term_parser.py
|
arcodergh/plyse
|
bb44543f9c812401489ceba68b24b8618d263830
|
[
"MIT"
] | 11
|
2016-05-31T20:09:57.000Z
|
2022-02-18T11:43:50.000Z
|
plyse/term_parser.py
|
arcodergh/plyse
|
bb44543f9c812401489ceba68b24b8618d263830
|
[
"MIT"
] | 13
|
2016-05-31T19:41:36.000Z
|
2021-03-01T15:22:38.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .util import load_module
class TermParserFactory(object):
@staticmethod
def build_from_conf(conf):
args = {k: conf[k] for k in ['default_fields', 'aliases', 'integer_as_string'] if k in conf}
return TermParser(**args) if not 'class' in conf else load_module(conf['class'])(**args)
@staticmethod
def build_default():
return TermParser()
class TermParser(object):
"""
Parse and build a term from the grammar matches. A Term represents a query component that can have a specific field
to look for, or a default one, a field type, the value required for that field and the type of value.
TermParser defines methods to be used in combination with :class:Grammar as the callbacks for the pyparsing
setParseAction method.
Callback parameters are always:
- matched string from query string
- position of the match
- pyparsing token list
"""
def __init__(self, default_fields=['default'], aliases=None, integer_as_string=False):
self._default_fields = default_fields
self._field_name_aliases = aliases if aliases else {}
self._integers_as_string = integer_as_string
def _build_field_data(self, field_values, field_type):
return {Term.FIELD: field_values, Term.FIELD_TYPE: field_type}
def _build_value_data(self, value, value_type):
return {Term.VAL: value, Term.VAL_TYPE: value_type}
def _build_term_with_default_fields(self, value_dict):
default_fields = self._default_fields[0] if len(self._default_fields) == 1 else self._default_fields
r = self._build_field_data(default_fields, Term.DEFAULT)
r.update(value_dict)
return r
@property
def aliases(self):
return self._field_name_aliases
def term_parse(self, string, location, tokens):
"""
Term parse receives a list with the components of a query term, the fields to look for and the desired value.
Those components are expanded by field_parse and integer_parse r whatever value is matched, to a dictionary
specifying the field_type and field_value as well as value_type and value. Thus, tokens[0] contains one element
for the field data, and another for the value data. If there's only one item, it means no field was specified only
a value, and so we treat it as a default field which can be configured to be expanded to several fields.
If tokens[0] has 2 elements:
> tokens[0][0]: field dict
> tokens[0][1]: value dict
If tokens[0] has 1 element:
> tokens[0][0]: value dict
"""
if tokens:
if len(tokens[0]) == 1: # If there was no field specified, use the default
r = self._build_term_with_default_fields(tokens[0][0])
else:
r = tokens[0][0]
r.update(tokens[0][1])
return Term(**r)
def keyword_parse(self, string=None, location=None, tokens=None):
"""
Keywords are defined externally and so values are restricted to the ones accepted/defined. They are treated as
strings always and so the parsing method receives a token list with <keyword>, <separator>, <value>
> ej: has:notification => token list would be ['has', ':', 'notification']
"""
if tokens:
fields = [f for f in "".join(tokens).split(":") if f]
output = self._build_field_data(fields[0], Term.KEYWORD)
output.update(self._build_value_data(fields[1], Term.KEYWORD_VALUE))
return output
def field_parse(self, string, location, tokens):
"""
Fields are whatever comes before a separator and they are usually use for attribute/property matching. The value
of a field is parsed separately form the field name and it depends on the definition of the grammar and the
accepted/supported values. Thus this method receives a token list with <field name> <separator>.
If combined or nested fields are allowed, the pattern would be:
<field name> <separator> <field name> <separator> ...
> ej: address:zip:ABC1234 => token list would be ['address', ':', 'zip']
"""
if tokens:
fields = [f for f in "".join(tokens).split(":") if f]
t = fields if len(fields) > 1 else fields[0]
field_value = self._field_name_aliases.get(t, t)
return self._build_field_data(field_value, Term.ATTRIBUTE)
def integer_parse(self, string, location, tokens):
if tokens:
r = self._build_value_data(int(tokens[0]), Term.INT)
if self._integers_as_string:
r[Term.VAL_TYPE] = Term.PARTIAL_STRING
r[Term.VAL] = str(r[Term.VAL])
return r
def integer_comparison_parse(self, string, location, tokens):
if tokens:
val = int(tokens[1]) if not self._integers_as_string else tokens[1]
for symbol, value_type in [('<', Term.LOWER_THAN), ('<=', Term.LOWER_EQUAL_THAN),
('>', Term.GREATER_THAN), ('>=', Term.GREATER_EQUAL_THAN)]:
if tokens[0] == symbol:
return self._build_value_data(val, value_type)
raise Exception("Invalid comparison symbol!") # should never get here since pyparsing would fail before
def quoted_string_parse(self, string, location, tokens):
if tokens:
return self._build_value_data(tokens[0], Term.EXACT_STRING if '*' not in tokens[0] else Term.PARTIAL_STRING)
def partial_string_parse(self, string, location, tokens):
if tokens:
return self._build_value_data(tokens[0], Term.PARTIAL_STRING)
def range_parse(self, string, location, tokens):
if tokens:
return self._build_value_data([tokens[0][Term.VAL], tokens[2][Term.VAL]],
Term.RANGE % tokens[0][Term.VAL_TYPE])
class Term(dict):
# value types
RANGE = "%s_range"
INT = 'int'
EXACT_STRING = 'exact_string'
PARTIAL_STRING = 'partial_string'
KEYWORD_VALUE = 'keyword_value'
GREATER_THAN = 'greater_than'
GREATER_EQUAL_THAN = 'greater_equal_than'
LOWER_THAN = 'lower_than'
LOWER_EQUAL_THAN = 'lower_equal_than'
# field types
KEYWORD = 'keyword'
DEFAULT = 'default'
ATTRIBUTE = 'attribute'
# term keys
FIELD = 'field'
FIELD_TYPE = 'field_type'
VAL = 'val'
VAL_TYPE = 'val_type'
def __getattr__(self, key):
if key in self:
return self[key]
else:
raise AttributeError("Term doesn't have attribute '%s'" % key)
@property
def field(self):
return self[self.FIELD] if self.FIELD in self else None
@property
def field_type(self):
return self[self.FIELD_TYPE] if self.FIELD_TYPE in self else None
@property
def value(self):
return self[self.VAL] if self.VAL in self else None
@property
def value_type(self):
return self[self.VAL_TYPE] if self.VAL_TYPE in self else None
| 36.984615
| 122
| 0.640044
|
from .util import load_module
class TermParserFactory(object):
@staticmethod
def build_from_conf(conf):
args = {k: conf[k] for k in ['default_fields', 'aliases', 'integer_as_string'] if k in conf}
return TermParser(**args) if not 'class' in conf else load_module(conf['class'])(**args)
@staticmethod
def build_default():
return TermParser()
class TermParser(object):
def __init__(self, default_fields=['default'], aliases=None, integer_as_string=False):
self._default_fields = default_fields
self._field_name_aliases = aliases if aliases else {}
self._integers_as_string = integer_as_string
def _build_field_data(self, field_values, field_type):
return {Term.FIELD: field_values, Term.FIELD_TYPE: field_type}
def _build_value_data(self, value, value_type):
return {Term.VAL: value, Term.VAL_TYPE: value_type}
def _build_term_with_default_fields(self, value_dict):
default_fields = self._default_fields[0] if len(self._default_fields) == 1 else self._default_fields
r = self._build_field_data(default_fields, Term.DEFAULT)
r.update(value_dict)
return r
@property
def aliases(self):
return self._field_name_aliases
def term_parse(self, string, location, tokens):
if tokens:
if len(tokens[0]) == 1:
r = self._build_term_with_default_fields(tokens[0][0])
else:
r = tokens[0][0]
r.update(tokens[0][1])
return Term(**r)
def keyword_parse(self, string=None, location=None, tokens=None):
if tokens:
fields = [f for f in "".join(tokens).split(":") if f]
output = self._build_field_data(fields[0], Term.KEYWORD)
output.update(self._build_value_data(fields[1], Term.KEYWORD_VALUE))
return output
def field_parse(self, string, location, tokens):
if tokens:
fields = [f for f in "".join(tokens).split(":") if f]
t = fields if len(fields) > 1 else fields[0]
field_value = self._field_name_aliases.get(t, t)
return self._build_field_data(field_value, Term.ATTRIBUTE)
def integer_parse(self, string, location, tokens):
if tokens:
r = self._build_value_data(int(tokens[0]), Term.INT)
if self._integers_as_string:
r[Term.VAL_TYPE] = Term.PARTIAL_STRING
r[Term.VAL] = str(r[Term.VAL])
return r
def integer_comparison_parse(self, string, location, tokens):
if tokens:
val = int(tokens[1]) if not self._integers_as_string else tokens[1]
for symbol, value_type in [('<', Term.LOWER_THAN), ('<=', Term.LOWER_EQUAL_THAN),
('>', Term.GREATER_THAN), ('>=', Term.GREATER_EQUAL_THAN)]:
if tokens[0] == symbol:
return self._build_value_data(val, value_type)
raise Exception("Invalid comparison symbol!")
def quoted_string_parse(self, string, location, tokens):
if tokens:
return self._build_value_data(tokens[0], Term.EXACT_STRING if '*' not in tokens[0] else Term.PARTIAL_STRING)
def partial_string_parse(self, string, location, tokens):
if tokens:
return self._build_value_data(tokens[0], Term.PARTIAL_STRING)
def range_parse(self, string, location, tokens):
if tokens:
return self._build_value_data([tokens[0][Term.VAL], tokens[2][Term.VAL]],
Term.RANGE % tokens[0][Term.VAL_TYPE])
class Term(dict):
RANGE = "%s_range"
INT = 'int'
EXACT_STRING = 'exact_string'
PARTIAL_STRING = 'partial_string'
KEYWORD_VALUE = 'keyword_value'
GREATER_THAN = 'greater_than'
GREATER_EQUAL_THAN = 'greater_equal_than'
LOWER_THAN = 'lower_than'
LOWER_EQUAL_THAN = 'lower_equal_than'
KEYWORD = 'keyword'
DEFAULT = 'default'
ATTRIBUTE = 'attribute'
FIELD = 'field'
FIELD_TYPE = 'field_type'
VAL = 'val'
VAL_TYPE = 'val_type'
def __getattr__(self, key):
if key in self:
return self[key]
else:
raise AttributeError("Term doesn't have attribute '%s'" % key)
@property
def field(self):
return self[self.FIELD] if self.FIELD in self else None
@property
def field_type(self):
return self[self.FIELD_TYPE] if self.FIELD_TYPE in self else None
@property
def value(self):
return self[self.VAL] if self.VAL in self else None
@property
def value_type(self):
return self[self.VAL_TYPE] if self.VAL_TYPE in self else None
| true
| true
|
f70b2a813717d6b844f5a5aa9a42bc87923adf2a
| 7,571
|
py
|
Python
|
bluetail/models/ocds_models.py
|
CodeForAfrica/bluetail
|
776e9f2993b6bc91c5ab0337fca4efcbaa1c320d
|
[
"MIT"
] | 1
|
2022-01-31T08:18:35.000Z
|
2022-01-31T08:18:35.000Z
|
bluetail/models/ocds_models.py
|
CodeForAfrica/bluetail
|
776e9f2993b6bc91c5ab0337fca4efcbaa1c320d
|
[
"MIT"
] | 1
|
2022-02-03T06:53:36.000Z
|
2022-02-03T10:22:33.000Z
|
bluetail/models/ocds_models.py
|
CodeForAfrica/bluetail
|
776e9f2993b6bc91c5ab0337fca4efcbaa1c320d
|
[
"MIT"
] | null | null | null |
from django.contrib.postgres.fields import JSONField
from django.db import models
from django_pgviews import view as pgviews
from cove.input.models import SuppliedData
from .bluetail_models import Flag
class OCDSPackageDataJSON(models.Model):
"""
Model to store OCDS JSON package data.
"""
package_data = JSONField(null=True)
supplied_data = models.ForeignKey(SuppliedData, on_delete=None, null=True)
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_package_data_json'
class OCDSPackageData(pgviews.View):
"""
Model to store OCDS JSON package data.
"""
package_data = JSONField()
supplied_data = models.ForeignKey(SuppliedData, on_delete=None)
uri = models.TextField()
publishedDate = models.DateTimeField()
publisher = JSONField()
publisher_uid = models.TextField()
publisher_uri = models.TextField()
publisher_name = models.TextField()
publisher_scheme = models.TextField()
extensions = JSONField()
sql = """
SELECT
package.id,
package.supplied_data_id,
package.package_data ->> 'uri' as uri,
package.package_data ->> 'license' as license,
package.package_data ->> 'version' as version,
package.package_data ->> 'publishedDate' as publishedDate,
package.package_data ->> 'publicationPolicy' as publicationPolicy,
package.package_data -> 'packages' as packages,
package.package_data -> 'publisher' as publisher,
package.package_data -> 'publisher' ->> 'uid' as publisher_uid,
package.package_data -> 'publisher' ->> 'uri' as publisher_uri,
package.package_data -> 'publisher' ->> 'name' as publisher_name,
package.package_data -> 'publisher' ->> 'scheme' as publisher_scheme,
package.package_data -> 'extensions' as extensions
FROM bluetail_ocds_package_data_json package
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_package_data_view'
managed = False
class OCDSRecordJSON(models.Model):
"""
Model to store OCDS JSON records.
"""
ocid = models.TextField(primary_key=True)
record_json = JSONField()
package_data = models.ForeignKey(OCDSPackageDataJSON, on_delete=None, null=True)
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_record_json'
verbose_name_plural = 'OCDS JSON Records'
class OCDSReleaseJSON(pgviews.View):
"""
Model to store OCDS JSON releases.
OCID must be unique so multiple releases for a single OCID should be compiled before insertion.
"""
ocid = models.TextField(primary_key=True)
release_id = models.TextField()
release_json = JSONField()
package_data = models.ForeignKey(OCDSPackageDataJSON, on_delete=None, null=True)
sql = """
SELECT
ocds.ocid,
ocds.record_json -> 'compiledRelease' ->> 'id' as release_id,
ocds.record_json -> 'compiledRelease' as release_json,
ocds.package_data_id
FROM bluetail_ocds_record_json ocds
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_release_json_view'
managed = False
class OCDSTender(pgviews.View):
"""
django-pg-views for extracting Tender details from an OCDSReleaseJSON object
Tender as from an OCDS version 1.1 release
https://standard.open-contracting.org/latest/en/schema/reference/#tender
"""
# projection = ['bluetail.OCDSReleaseJSON.*', ]
# dependencies = ['bluetail.OtherView',]
ocid = models.TextField(primary_key=True)
release_id = models.TextField()
release_json = JSONField()
package_data_id = models.TextField()
title = models.TextField()
description = models.TextField()
value = models.FloatField()
currency = models.TextField()
release_date = models.DateTimeField()
tender_startdate = models.DateTimeField()
tender_enddate = models.DateTimeField()
buyer = models.TextField()
buyer_id = models.TextField()
sql = """
SELECT
ocds.ocid,
ocds.release_id,
ocds.release_json,
ocds.package_data_id,
ocds.release_json -> 'tag' as release_tag,
ocds.release_json ->> 'language' AS language,
ocds.release_json -> 'tender' ->> 'title' AS title,
ocds.release_json -> 'tender' ->> 'description' AS description,
ocds.release_json -> 'tender' -> 'value' ->> 'amount' AS value,
ocds.release_json -> 'tender' -> 'value' ->> 'currency' AS currency,
cast(NULLIF(ocds.release_json ->> 'date', '') AS TIMESTAMPTZ) AS release_date,
cast(NULLIF(ocds.release_json -> 'tender' -> 'tenderPeriod' ->> 'startDate', '') AS TIMESTAMPTZ) AS tender_startdate,
cast(NULLIF(ocds.release_json -> 'tender' -> 'tenderPeriod' ->> 'endDate', '') AS TIMESTAMPTZ) AS tender_enddate,
ocds.release_json -> 'buyer' ->> 'name' AS buyer,
ocds.release_json -> 'buyer' ->> 'id' AS buyer_id
FROM bluetail_ocds_release_json_view ocds
"""
@property
def flags(self):
return Flag.objects.filter(flagattachment__ocid=self.ocid)
@property
def total_warnings(self):
return self.flags.filter(flag_type="warning").count()
@property
def total_errors(self):
return self.flags.filter(flag_type="error").count()
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_tender_view'
managed = False
class OCDSTenderer(pgviews.View):
"""
View for extracting Party details from an OCDSReleaseJSON object
Parties as from an OCDS version 1.1 release in
https://standard.open-contracting.org/latest/en/schema/reference/#parties
"""
# dependencies = ['bluetail.OtherView',]
# projection = ['bluetail.OCDSReleaseJSON.ocid', ]
ocid = models.TextField(primary_key=True)
release_json = JSONField()
party_json = JSONField()
party_id = models.TextField()
party_role = models.TextField()
party_identifier_scheme = models.TextField()
party_identifier_id = models.TextField()
party_legalname = models.TextField()
party_name = models.TextField()
party_countryname = models.TextField()
contact_name = models.TextField()
sql = """
SELECT
ocds.ocid,
ocds.release_id,
ocds.release_json,
party as party_json,
role AS party_role,
party ->> 'id' as party_id,
party -> 'identifier' ->> 'scheme' as party_identifier_scheme,
party -> 'identifier' ->> 'id' as party_identifier_id,
party -> 'identifier' ->> 'legalName' as party_legalname,
party -> 'address' ->> 'countryName' as party_countryname,
party ->> 'name' party_name,
party -> 'contactPoint' ->> 'name' as contact_name
FROM
bluetail_ocds_release_json_view ocds,
LATERAL jsonb_array_elements(ocds.release_json -> 'parties') party,
LATERAL jsonb_array_elements_text(party -> 'roles') role
WHERE role = 'tenderer'
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_tenderers_view'
managed = False
| 36.752427
| 129
| 0.632941
|
from django.contrib.postgres.fields import JSONField
from django.db import models
from django_pgviews import view as pgviews
from cove.input.models import SuppliedData
from .bluetail_models import Flag
class OCDSPackageDataJSON(models.Model):
package_data = JSONField(null=True)
supplied_data = models.ForeignKey(SuppliedData, on_delete=None, null=True)
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_package_data_json'
class OCDSPackageData(pgviews.View):
package_data = JSONField()
supplied_data = models.ForeignKey(SuppliedData, on_delete=None)
uri = models.TextField()
publishedDate = models.DateTimeField()
publisher = JSONField()
publisher_uid = models.TextField()
publisher_uri = models.TextField()
publisher_name = models.TextField()
publisher_scheme = models.TextField()
extensions = JSONField()
sql = """
SELECT
package.id,
package.supplied_data_id,
package.package_data ->> 'uri' as uri,
package.package_data ->> 'license' as license,
package.package_data ->> 'version' as version,
package.package_data ->> 'publishedDate' as publishedDate,
package.package_data ->> 'publicationPolicy' as publicationPolicy,
package.package_data -> 'packages' as packages,
package.package_data -> 'publisher' as publisher,
package.package_data -> 'publisher' ->> 'uid' as publisher_uid,
package.package_data -> 'publisher' ->> 'uri' as publisher_uri,
package.package_data -> 'publisher' ->> 'name' as publisher_name,
package.package_data -> 'publisher' ->> 'scheme' as publisher_scheme,
package.package_data -> 'extensions' as extensions
FROM bluetail_ocds_package_data_json package
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_package_data_view'
managed = False
class OCDSRecordJSON(models.Model):
ocid = models.TextField(primary_key=True)
record_json = JSONField()
package_data = models.ForeignKey(OCDSPackageDataJSON, on_delete=None, null=True)
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_record_json'
verbose_name_plural = 'OCDS JSON Records'
class OCDSReleaseJSON(pgviews.View):
ocid = models.TextField(primary_key=True)
release_id = models.TextField()
release_json = JSONField()
package_data = models.ForeignKey(OCDSPackageDataJSON, on_delete=None, null=True)
sql = """
SELECT
ocds.ocid,
ocds.record_json -> 'compiledRelease' ->> 'id' as release_id,
ocds.record_json -> 'compiledRelease' as release_json,
ocds.package_data_id
FROM bluetail_ocds_record_json ocds
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_release_json_view'
managed = False
class OCDSTender(pgviews.View):
ocid = models.TextField(primary_key=True)
release_id = models.TextField()
release_json = JSONField()
package_data_id = models.TextField()
title = models.TextField()
description = models.TextField()
value = models.FloatField()
currency = models.TextField()
release_date = models.DateTimeField()
tender_startdate = models.DateTimeField()
tender_enddate = models.DateTimeField()
buyer = models.TextField()
buyer_id = models.TextField()
sql = """
SELECT
ocds.ocid,
ocds.release_id,
ocds.release_json,
ocds.package_data_id,
ocds.release_json -> 'tag' as release_tag,
ocds.release_json ->> 'language' AS language,
ocds.release_json -> 'tender' ->> 'title' AS title,
ocds.release_json -> 'tender' ->> 'description' AS description,
ocds.release_json -> 'tender' -> 'value' ->> 'amount' AS value,
ocds.release_json -> 'tender' -> 'value' ->> 'currency' AS currency,
cast(NULLIF(ocds.release_json ->> 'date', '') AS TIMESTAMPTZ) AS release_date,
cast(NULLIF(ocds.release_json -> 'tender' -> 'tenderPeriod' ->> 'startDate', '') AS TIMESTAMPTZ) AS tender_startdate,
cast(NULLIF(ocds.release_json -> 'tender' -> 'tenderPeriod' ->> 'endDate', '') AS TIMESTAMPTZ) AS tender_enddate,
ocds.release_json -> 'buyer' ->> 'name' AS buyer,
ocds.release_json -> 'buyer' ->> 'id' AS buyer_id
FROM bluetail_ocds_release_json_view ocds
"""
@property
def flags(self):
return Flag.objects.filter(flagattachment__ocid=self.ocid)
@property
def total_warnings(self):
return self.flags.filter(flag_type="warning").count()
@property
def total_errors(self):
return self.flags.filter(flag_type="error").count()
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_tender_view'
managed = False
class OCDSTenderer(pgviews.View):
ocid = models.TextField(primary_key=True)
release_json = JSONField()
party_json = JSONField()
party_id = models.TextField()
party_role = models.TextField()
party_identifier_scheme = models.TextField()
party_identifier_id = models.TextField()
party_legalname = models.TextField()
party_name = models.TextField()
party_countryname = models.TextField()
contact_name = models.TextField()
sql = """
SELECT
ocds.ocid,
ocds.release_id,
ocds.release_json,
party as party_json,
role AS party_role,
party ->> 'id' as party_id,
party -> 'identifier' ->> 'scheme' as party_identifier_scheme,
party -> 'identifier' ->> 'id' as party_identifier_id,
party -> 'identifier' ->> 'legalName' as party_legalname,
party -> 'address' ->> 'countryName' as party_countryname,
party ->> 'name' party_name,
party -> 'contactPoint' ->> 'name' as contact_name
FROM
bluetail_ocds_release_json_view ocds,
LATERAL jsonb_array_elements(ocds.release_json -> 'parties') party,
LATERAL jsonb_array_elements_text(party -> 'roles') role
WHERE role = 'tenderer'
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_tenderers_view'
managed = False
| true
| true
|
f70b2ab2a3782f1d53ea23d291f9fea3c10fe878
| 7,613
|
py
|
Python
|
lib/tools/common.py
|
rowlap/ganeti
|
8ed853a8ec86cd9c295a086403a0ddd8c36c8173
|
[
"BSD-2-Clause"
] | 1
|
2022-01-30T01:46:46.000Z
|
2022-01-30T01:46:46.000Z
|
lib/tools/common.py
|
seanpm2001/ganeti
|
9129897cbe631bac198cbb432074bde789c6c29e
|
[
"BSD-2-Clause"
] | null | null | null |
lib/tools/common.py
|
seanpm2001/ganeti
|
9129897cbe631bac198cbb432074bde789c6c29e
|
[
"BSD-2-Clause"
] | null | null | null |
#
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions for tool scripts.
"""
import logging
import os
import time
from io import StringIO
import OpenSSL
from ganeti import constants
from ganeti import errors
from ganeti import pathutils
from ganeti import utils
from ganeti import serializer
from ganeti import ssconf
from ganeti import ssh
def VerifyOptions(parser, opts, args):
"""Verifies options and arguments for correctness.
"""
if args:
parser.error("No arguments are expected")
return opts
def _VerifyCertificateStrong(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
"""Verifies a certificate against the local node daemon certificate.
Includes elaborate tests of encodings etc., and returns formatted
certificate.
@type cert_pem: string
@param cert_pem: Certificate and key in PEM format
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise error_fn("(stdin) Unable to load certificate: %s" % err)
try:
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
raise error_fn("(stdin) Unable to load private key: %s" % err)
# Check certificate with given key; this detects cases where the key given on
# stdin doesn't match the certificate also given on stdin
try:
utils.X509CertKeyCheck(cert, key)
except OpenSSL.SSL.Error:
raise error_fn("(stdin) Certificate is not signed with given key")
# Standard checks, including check against an existing local certificate
# (no-op if that doesn't exist)
_check_fn(cert)
key_encoded = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
cert_encoded = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert)
complete_cert_encoded = key_encoded + cert_encoded
if not cert_pem == complete_cert_encoded.decode('ascii'):
logging.error("The certificate differs after being reencoded. Please"
" renew the certificates cluster-wide to prevent future"
" inconsistencies.")
# Format for storing on disk
buf = StringIO()
buf.write(cert_pem)
return buf.getvalue()
def _VerifyCertificateSoft(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
"""Verifies a certificate against the local node daemon certificate.
@type cert_pem: string
@param cert_pem: Certificate in PEM format (no key)
"""
try:
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
pass
else:
raise error_fn("No private key may be given")
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise errors.X509CertError("(stdin)",
"Unable to load certificate: %s" % err)
_check_fn(cert)
def VerifyCertificateSoft(data, error_fn, _verify_fn=_VerifyCertificateSoft):
"""Verifies cluster certificate if existing.
@type data: dict
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
cert = data.get(constants.SSHS_NODE_DAEMON_CERTIFICATE)
if cert:
_verify_fn(cert, error_fn)
def VerifyCertificateStrong(data, error_fn,
_verify_fn=_VerifyCertificateStrong):
"""Verifies cluster certificate. Throws error when not existing.
@type data: dict
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
cert = data.get(constants.NDS_NODE_DAEMON_CERTIFICATE)
if not cert:
raise error_fn("Node daemon certificate must be specified")
return _verify_fn(cert, error_fn)
def VerifyClusterName(data, error_fn, cluster_name_constant,
_verify_fn=ssconf.VerifyClusterName):
"""Verifies cluster name.
@type data: dict
"""
name = data.get(cluster_name_constant)
if name:
_verify_fn(name)
else:
raise error_fn("Cluster name must be specified")
return name
def VerifyHmac(data, error_fn):
"""Verifies the presence of the hmac secret.
@type data: dict
"""
hmac = data.get(constants.NDS_HMAC)
if not hmac:
raise error_fn("Hmac key must be provided")
return hmac
def LoadData(raw, data_check):
"""Parses and verifies input data.
@rtype: dict
"""
result = None
try:
result = serializer.LoadAndVerifyJson(raw, data_check)
logging.debug("Received data: %s", serializer.DumpJson(result))
except Exception as e:
logging.warn("Received data is not valid json: %s.", str(raw))
raise e
return result
def GenerateRootSshKeys(key_type, key_bits, error_fn, _suffix="",
_homedir_fn=None):
"""Generates root's SSH keys for this node.
"""
ssh.InitSSHSetup(key_type, key_bits, error_fn=error_fn,
_homedir_fn=_homedir_fn, _suffix=_suffix)
def GenerateClientCertificate(
data, error_fn, client_cert=pathutils.NODED_CLIENT_CERT_FILE,
signing_cert=pathutils.NODED_CERT_FILE):
"""Regenerates the client certificate of the node.
@type data: string
@param data: the JSON-formated input data
"""
if not os.path.exists(signing_cert):
raise error_fn("The signing certificate '%s' cannot be found."
% signing_cert)
# TODO: This sets the serial number to the number of seconds
# since epoch. This is technically not a correct serial number
# (in the way SSL is supposed to be used), but it serves us well
# enough for now, as we don't have any infrastructure for keeping
# track of the number of signed certificates yet.
serial_no = int(time.time())
# The hostname of the node is provided with the input data.
hostname = data.get(constants.NDS_NODE_NAME)
if not hostname:
raise error_fn("No hostname found.")
utils.GenerateSignedSslCert(client_cert, serial_no, signing_cert,
common_name=hostname)
| 30.210317
| 80
| 0.720609
|
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
import logging
import os
import time
from io import StringIO
import OpenSSL
from ganeti import constants
from ganeti import errors
from ganeti import pathutils
from ganeti import utils
from ganeti import serializer
from ganeti import ssconf
from ganeti import ssh
def VerifyOptions(parser, opts, args):
if args:
parser.error("No arguments are expected")
return opts
def _VerifyCertificateStrong(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise error_fn("(stdin) Unable to load certificate: %s" % err)
try:
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
raise error_fn("(stdin) Unable to load private key: %s" % err)
try:
utils.X509CertKeyCheck(cert, key)
except OpenSSL.SSL.Error:
raise error_fn("(stdin) Certificate is not signed with given key")
# Standard checks, including check against an existing local certificate
# (no-op if that doesn't exist)
_check_fn(cert)
key_encoded = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
cert_encoded = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert)
complete_cert_encoded = key_encoded + cert_encoded
if not cert_pem == complete_cert_encoded.decode('ascii'):
logging.error("The certificate differs after being reencoded. Please"
" renew the certificates cluster-wide to prevent future"
" inconsistencies.")
buf = StringIO()
buf.write(cert_pem)
return buf.getvalue()
def _VerifyCertificateSoft(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
try:
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
pass
else:
raise error_fn("No private key may be given")
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise errors.X509CertError("(stdin)",
"Unable to load certificate: %s" % err)
_check_fn(cert)
def VerifyCertificateSoft(data, error_fn, _verify_fn=_VerifyCertificateSoft):
cert = data.get(constants.SSHS_NODE_DAEMON_CERTIFICATE)
if cert:
_verify_fn(cert, error_fn)
def VerifyCertificateStrong(data, error_fn,
_verify_fn=_VerifyCertificateStrong):
cert = data.get(constants.NDS_NODE_DAEMON_CERTIFICATE)
if not cert:
raise error_fn("Node daemon certificate must be specified")
return _verify_fn(cert, error_fn)
def VerifyClusterName(data, error_fn, cluster_name_constant,
_verify_fn=ssconf.VerifyClusterName):
name = data.get(cluster_name_constant)
if name:
_verify_fn(name)
else:
raise error_fn("Cluster name must be specified")
return name
def VerifyHmac(data, error_fn):
hmac = data.get(constants.NDS_HMAC)
if not hmac:
raise error_fn("Hmac key must be provided")
return hmac
def LoadData(raw, data_check):
result = None
try:
result = serializer.LoadAndVerifyJson(raw, data_check)
logging.debug("Received data: %s", serializer.DumpJson(result))
except Exception as e:
logging.warn("Received data is not valid json: %s.", str(raw))
raise e
return result
def GenerateRootSshKeys(key_type, key_bits, error_fn, _suffix="",
_homedir_fn=None):
ssh.InitSSHSetup(key_type, key_bits, error_fn=error_fn,
_homedir_fn=_homedir_fn, _suffix=_suffix)
def GenerateClientCertificate(
data, error_fn, client_cert=pathutils.NODED_CLIENT_CERT_FILE,
signing_cert=pathutils.NODED_CERT_FILE):
if not os.path.exists(signing_cert):
raise error_fn("The signing certificate '%s' cannot be found."
% signing_cert)
# track of the number of signed certificates yet.
serial_no = int(time.time())
# The hostname of the node is provided with the input data.
hostname = data.get(constants.NDS_NODE_NAME)
if not hostname:
raise error_fn("No hostname found.")
utils.GenerateSignedSslCert(client_cert, serial_no, signing_cert,
common_name=hostname)
| true
| true
|
f70b2b2cddf15273b70142530c473aa2b5c66fe5
| 11,360
|
py
|
Python
|
meraki/controllers/saml_roles_controller.py
|
bossypants22/python-sdk-test
|
37701d62dc18c2abb910eb790ab978913adcaf7b
|
[
"MIT"
] | null | null | null |
meraki/controllers/saml_roles_controller.py
|
bossypants22/python-sdk-test
|
37701d62dc18c2abb910eb790ab978913adcaf7b
|
[
"MIT"
] | null | null | null |
meraki/controllers/saml_roles_controller.py
|
bossypants22/python-sdk-test
|
37701d62dc18c2abb910eb790ab978913adcaf7b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
from meraki.api_helper import APIHelper
from meraki.configuration import Configuration
from meraki.controllers.base_controller import BaseController
from meraki.http.auth.custom_header_auth import CustomHeaderAuth
class SAMLRolesController(BaseController):
"""A Controller to access Endpoints in the meraki API."""
def get_organization_saml_roles(self,
organization_id):
"""Does a GET request to /organizations/{organizationId}/samlRoles.
List the SAML roles for this organization
Args:
organization_id (string): TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=organization_id)
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': organization_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def create_organization_saml_role(self,
options=dict()):
"""Does a POST request to /organizations/{organizationId}/samlRoles.
Create a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
create_organization_saml_role --
CreateOrganizationSamlRoleModel -- TODO: type
description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('create_organization_saml_role')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def get_organization_saml_role(self,
options=dict()):
"""Does a GET request to /organizations/{organizationId}/samlRoles/{id}.
Return a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
id -- string -- TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def update_organization_saml_role(self,
options=dict()):
"""Does a PUT request to /organizations/{organizationId}/samlRoles/{id}.
Update a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
id -- string -- TODO: type description here. Example:
update_organization_saml_role --
UpdateOrganizationSamlRoleModel -- TODO: type
description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('update_organization_saml_role')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def delete_organization_saml_role(self,
options=dict()):
"""Does a DELETE request to /organizations/{organizationId}/samlRoles/{id}.
Remove a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
id -- string -- TODO: type description here. Example:
Returns:
void: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.http_client.delete(_query_url)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
| 39.859649
| 154
| 0.603081
|
from meraki.api_helper import APIHelper
from meraki.configuration import Configuration
from meraki.controllers.base_controller import BaseController
from meraki.http.auth.custom_header_auth import CustomHeaderAuth
class SAMLRolesController(BaseController):
def get_organization_saml_roles(self,
organization_id):
self.validate_parameters(organization_id=organization_id)
_url_path = '/organizations/{organizationId}/samlRoles'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': organization_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
_headers = {
'accept': 'application/json'
}
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
return APIHelper.json_deserialize(_context.response.raw_body)
def create_organization_saml_role(self,
options=dict()):
self.validate_parameters(organization_id=options.get("organization_id"))
_url_path = '/organizations/{organizationId}/samlRoles'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('create_organization_saml_role')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
return APIHelper.json_deserialize(_context.response.raw_body)
def get_organization_saml_role(self,
options=dict()):
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
_headers = {
'accept': 'application/json'
}
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
return APIHelper.json_deserialize(_context.response.raw_body)
def update_organization_saml_role(self,
options=dict()):
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('update_organization_saml_role')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
return APIHelper.json_deserialize(_context.response.raw_body)
def delete_organization_saml_role(self,
options=dict()):
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
_request = self.http_client.delete(_query_url)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
| true
| true
|
f70b2c2919f1a0e38a2129982ef8b02639dfb5a5
| 2,171
|
py
|
Python
|
credential.py
|
fiona-niwiduhaye/python-password-locker
|
aaed8ceac7f1dc0301db9d20594413ffd2e0b9ab
|
[
"Unlicense"
] | null | null | null |
credential.py
|
fiona-niwiduhaye/python-password-locker
|
aaed8ceac7f1dc0301db9d20594413ffd2e0b9ab
|
[
"Unlicense"
] | null | null | null |
credential.py
|
fiona-niwiduhaye/python-password-locker
|
aaed8ceac7f1dc0301db9d20594413ffd2e0b9ab
|
[
"Unlicense"
] | null | null | null |
class Credential:
'''
Class that generates instances of a users credentials
'''
# Empty list of credentials
credential_list = []
def __init__(self, user_password, credential_name, credential_password):
'''
__init__ method to define the properties of a User object
Args:
credential_name : name of an account
user_password : password of the user
credential_password : password for the user account
'''
self.user_password = user_password
self.credential_name = credential_name
self.credential_password = credential_password
def save_credential(self):
'''
Method that saves a user's credentials to credential list
'''
Credential.credential_list.append(self)
@classmethod
def generate_password(cls):
'''
Method that generates a random alphanumeric password
'''
# Length of the generated password
size = 8
# Generate random alphanumeric
alphanum = string.ascii_uppercase + string.digits + string.ascii_lowercase
# Create password
password = ''.join( choice(alphanum) for num in range(size) )
return password
@classmethod
def display_credential(cls,password):
'''
Method that returns the credential list
Args:
password : the user password
'''
user_credential_list = []
for credential in cls.credential_list:
if credential.user_password == password:
user_credential_list.append(credential)
return user_credential_list
@classmethod
def credential_exist(cls, name):
'''
Method that checks if a credential exists in the credential list
Args:
name: name of the credential to search
Returns:
Boolean: true or false depending if the contact exists
'''
for credential in cls.credential_list:
if credential.credential_name == name:
return True
return False
| 28.194805
| 82
| 0.605251
|
class Credential:
credential_list = []
def __init__(self, user_password, credential_name, credential_password):
self.user_password = user_password
self.credential_name = credential_name
self.credential_password = credential_password
def save_credential(self):
Credential.credential_list.append(self)
@classmethod
def generate_password(cls):
size = 8
alphanum = string.ascii_uppercase + string.digits + string.ascii_lowercase
password = ''.join( choice(alphanum) for num in range(size) )
return password
@classmethod
def display_credential(cls,password):
user_credential_list = []
for credential in cls.credential_list:
if credential.user_password == password:
user_credential_list.append(credential)
return user_credential_list
@classmethod
def credential_exist(cls, name):
for credential in cls.credential_list:
if credential.credential_name == name:
return True
return False
| true
| true
|
f70b2cd894737b29ceab7431ed16bf4467dc58e5
| 2,306
|
py
|
Python
|
tests/test_autoregressive.py
|
ai-di/Brancher
|
01d51137b0e6fc81512994c21cc3a19287353767
|
[
"MIT"
] | 208
|
2019-06-15T13:48:40.000Z
|
2021-10-16T05:03:46.000Z
|
tests/test_autoregressive.py
|
ai-di/Brancher
|
01d51137b0e6fc81512994c21cc3a19287353767
|
[
"MIT"
] | 18
|
2019-06-17T11:22:13.000Z
|
2019-09-26T10:45:59.000Z
|
tests/test_autoregressive.py
|
ai-di/Brancher
|
01d51137b0e6fc81512994c21cc3a19287353767
|
[
"MIT"
] | 32
|
2019-06-15T19:08:53.000Z
|
2020-02-16T13:39:41.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable
from brancher import inference
import brancher.functions as BF
# Probabilistic model #
T = 100
nu = LogNormalVariable(0.3, 1., 'nu')
x0 = NormalVariable(0., 1., 'x0')
b = BetaVariable(0.5, 1.5, 'b')
x = [x0]
names = ["x0"]
for t in range(1,T):
names.append("x{}".format(t))
x.append(NormalVariable(b * x[t - 1], nu, names[t]))
AR_model = ProbabilisticModel(x)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[xt].cpu().detach().numpy()) for xt in x]
true_b = data[b].cpu().detach().numpy()
true_nu = data[nu].cpu().detach().numpy()
print("The true coefficient is: {}".format(float(true_b)))
# Observe data #
[xt.observe(data[xt][:, 0, :]) for xt in x]
# Variational distribution #
Qnu = LogNormalVariable(0.5, 1., "nu", learnable=True)
Qb = BetaVariable(0.5, 0.5, "b", learnable=True)
variational_posterior = ProbabilisticModel([Qb, Qnu])
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=200,
number_samples=300,
optimizer='Adam',
lr=0.05)
loss_list = AR_model.diagnostics["loss curve"]
# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
nu_posterior_samples = posterior_samples[nu].cpu().detach().numpy().flatten()
b_posterior_samples = posterior_samples[b].cpu().detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))
print("The estimated coefficient is: {} +- {}".format(b_mean, b_sd))
# Two subplots, unpack the axes array immediately
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
ax1.plot(time_series)
ax1.set_title("Time series")
ax2.plot(np.array(loss_list))
ax2.set_title("Convergence")
ax2.set_xlabel("Iteration")
ax3.hist(b_posterior_samples, 25)
ax3.axvline(x=true_b, lw=2, c="r")
ax3.set_title("Posterior samples (b)")
ax3.set_xlim(0,1)
ax4.hist(nu_posterior_samples, 25)
ax4.axvline(x=true_nu, lw=2, c="r")
ax4.set_title("Posterior samples (nu)")
plt.show()
| 32.942857
| 87
| 0.702082
|
import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable
from brancher import inference
import brancher.functions as BF
T = 100
nu = LogNormalVariable(0.3, 1., 'nu')
x0 = NormalVariable(0., 1., 'x0')
b = BetaVariable(0.5, 1.5, 'b')
x = [x0]
names = ["x0"]
for t in range(1,T):
names.append("x{}".format(t))
x.append(NormalVariable(b * x[t - 1], nu, names[t]))
AR_model = ProbabilisticModel(x)
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[xt].cpu().detach().numpy()) for xt in x]
true_b = data[b].cpu().detach().numpy()
true_nu = data[nu].cpu().detach().numpy()
print("The true coefficient is: {}".format(float(true_b)))
[xt.observe(data[xt][:, 0, :]) for xt in x]
Qnu = LogNormalVariable(0.5, 1., "nu", learnable=True)
Qb = BetaVariable(0.5, 0.5, "b", learnable=True)
variational_posterior = ProbabilisticModel([Qb, Qnu])
AR_model.set_posterior_model(variational_posterior)
inference.perform_inference(AR_model,
number_iterations=200,
number_samples=300,
optimizer='Adam',
lr=0.05)
loss_list = AR_model.diagnostics["loss curve"]
posterior_samples = AR_model._get_posterior_sample(2000)
nu_posterior_samples = posterior_samples[nu].cpu().detach().numpy().flatten()
b_posterior_samples = posterior_samples[b].cpu().detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))
print("The estimated coefficient is: {} +- {}".format(b_mean, b_sd))
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
ax1.plot(time_series)
ax1.set_title("Time series")
ax2.plot(np.array(loss_list))
ax2.set_title("Convergence")
ax2.set_xlabel("Iteration")
ax3.hist(b_posterior_samples, 25)
ax3.axvline(x=true_b, lw=2, c="r")
ax3.set_title("Posterior samples (b)")
ax3.set_xlim(0,1)
ax4.hist(nu_posterior_samples, 25)
ax4.axvline(x=true_nu, lw=2, c="r")
ax4.set_title("Posterior samples (nu)")
plt.show()
| true
| true
|
f70b2dba7099f61d4cf65957484d07a3eb6e18bf
| 21,084
|
py
|
Python
|
madgraph/iolibs/template_files/subtraction/commons/beam_factorization_BF.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 1
|
2019-12-14T15:25:38.000Z
|
2019-12-14T15:25:38.000Z
|
madgraph/iolibs/template_files/subtraction/commons/beam_factorization_BF.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 26
|
2018-10-08T15:49:32.000Z
|
2020-05-15T13:33:36.000Z
|
madgraph/iolibs/template_files/subtraction/commons/beam_factorization_BF.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 2
|
2019-03-25T17:28:48.000Z
|
2021-04-21T12:15:53.000Z
|
##########################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
##########################################################################################
"""Implementation of NLO beam_factorization currents. These are the PDF counterterms as well
as the integrated initial state collinear counterterms."""
import os
import math
from madgraph.core.base_objects import EpsilonExpansion
import madgraph.various.misc as misc
import commons.utils as utils
import commons.QCD_local_currents as currents
import commons.factors_and_cuts as factors_and_cuts
from commons.integrated_current_expressions import HE
pjoin = os.path.join
CurrentImplementationError = utils.CurrentImplementationError
log = math.log
pi = math.pi
# All counterterms here adopt a xi-dependent distribution of the following form:
#
# Counterterm(xi) = F_+(xi) + [F] \delta(xi-1)
# (which can also be explicitely written)
# Counterterm(xi) = F(xi) + {F(xi)} \delta(xi-1) + [F] \delta(xi-1)
#
# where 'F' can either be a PDF counterterm or an interated collinear ISR counterterm.
# Then each piece of the distribution is assigned a different value for its attribute
# 'distribution_type' as follows:
#
# F(xi) --> distribution_type = 'bulk'
# {F(xi)} --> distribution_type = 'counterterm'
# [F(xi)] --> distribution_type = 'endpoint'
#=========================================================================================
# PDF Counterterm
#=========================================================================================
class QCD_beam_factorization_F0(currents.QCDBeamFactorizationCurrent):
"""Implements the NLO QCD PDF counterterm of type F(xi)"""
distribution_types_implemented_in_this_class = ['bulk','counterterm','endpoint']
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure').substructures[0]
# Check that it involves exactly one F structure with one leg.
if len(ss.substructures)==0:
factorization_structure = ss
elif len(ss.substructures)==1 and len(ss.substructures[0].substructures)==0:
factorization_structure = ss.substructures[0]
else:
return None
if factorization_structure.name() != 'F':
return None
if len(factorization_structure.legs) != 1:
return None
# Make sure the one leg of the F structure is initial-state
if not cls.is_initial(factorization_structure.legs[0]):
return None
# The current is valid (remember that this implements the PDF counterterm of
# all possible incoming flavors.
return init_vars
def evaluate_kernel(self, PS_point, process, xi, mu_r, mu_f, Q, normalization,
allowed_backward_evolved_flavors='ALL'):
""" Return an instance of BeamFactorizationCurrentEvaluation, whose 'values' entry
are dictionaries specifying the counterterm in flavor space, for the value of xi
specified in argument."""
if allowed_backward_evolved_flavors != 'ALL':
raise CurrentImplementationError('The current %s must always be called with'%self.__class__.__name__+
"allowed_backward_evolved_flavors='ALL', not %s"%str(allowed_backward_evolved_flavors))
# Only the order epsilon of the scales pre-factor matters here.
prefactor = EpsilonExpansion({
0 : 1.,
1 : log(mu_r**2 / mu_f**2)
})
prefactor *= EpsilonExpansion({-1:1.})*normalization
# Assign a fake xi for now if the distribution type is 'endpoint'
# TODO: this is not optimal, eventually we should put each of these three pieces in
# separate currents
if self.distribution_type == 'endpoint':
xi = 0.5
# Define the NLO QCD PDF counterterms kernels
kernel_gg = {
'bulk' : prefactor*(
2.*self.CA*( 1./ (1.-xi) + (1.-xi)/xi -1. + xi*(1-xi) )
),
'counterterm' : prefactor*( 2.*self.CA / (1.-xi) ),
'endpoint' : prefactor*( 11./6.*self.CA - 2./3.*self.NF*self.TR)
}
kernel_gq = {
'bulk' : prefactor*( self.CF*(1.+(1.-xi)**2)/xi ),
'counterterm' : None,
'endpoint' : None
}
kernel_qg = {
'bulk' : prefactor*( self.TR*(xi**2 + (1.-xi)**2) ),
'counterterm' : None,
'endpoint' : None
}
kernel_qq = {
'bulk' : prefactor*( self.CF*((1.+xi**2)/(1.-xi)) ),
'counterterm' : prefactor*( self.CF*((1.+xi**2)/(1.-xi)) ),
'endpoint' : None
}
active_quark_PDGs = tuple([pdg for pdg in range(1,7)+range(-1,-7,-1)
if pdg in self.beam_PDGs])
# Build the NLO flavor matrix
flavor_matrix = {}
for reduced_flavor in self.beam_PDGs:
# Gluon backward evolution
if reduced_flavor==21:
gluon_dict = {}
if kernel_gg[self.distribution_type] is not None:
gluon_dict[(21,)] = kernel_gg[self.distribution_type]
if active_quark_PDGs and kernel_gq[self.distribution_type] is not None:
gluon_dict[active_quark_PDGs] = kernel_gq[self.distribution_type]
if gluon_dict:
flavor_matrix[21] = gluon_dict
# Quark backward evolution
if reduced_flavor in active_quark_PDGs:
quark_dict = {}
if kernel_qg[self.distribution_type] is not None:
quark_dict[(21,)] = kernel_qg[self.distribution_type]
if kernel_qq[self.distribution_type] is not None:
quark_dict[(reduced_flavor,)] = kernel_qq[self.distribution_type]
if quark_dict:
flavor_matrix[reduced_flavor] = quark_dict
# Truncate all entries of the flavor matrix so as to remove irrelevant O(\eps) terms
for flav_in, flav_outs in flavor_matrix.items():
for flav_out, eps_expansion in flav_outs.items():
eps_expansion.truncate(max_power=0)
# Now assign the flavor matrix in the BeamFactorizationCurrentEvaluation instance
# If this is a physical contribution (i.e. not a counterterm) then we must enforce that
# the reduced kinematics is None as it will not even be read by MadNkLO.
evaluation = utils.BeamFactorizationCurrentEvaluation({
'spin_correlations' : [None,],
'color_correlations' : [None,],
'values' : { (0,0) : flavor_matrix }
})
return evaluation
#=========================================================================================
# PDF integrated initial-state single collinear counterterm
#=========================================================================================
class QCD_beam_factorization_single_collinear(currents.QCDBeamFactorizationCurrent):
"""Implements the NLO QCD initial-state single collinear integratated counterterm of type F(xi)"""
distribution_types_implemented_in_this_class = ['bulk','counterterm','endpoint']
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure').substructures[0]
# Check that it involves exactly one collinear structure with two legs.
if len(ss.substructures)!=1:
return None
collinear_structure = ss.substructures[0]
if collinear_structure.name() != 'C':
return None
if len(collinear_structure.legs) != 2:
return None
# Make sure that one of the two legs of the C structure is initial-state
if not any(cls.is_initial(leg) for leg in collinear_structure.legs):
return None
# The current is valid (remember that this implements the integrated
# initial state collinear counterterm of all possible incoming flavors.
return init_vars
def evaluate_kernel(self, PS_point, process, xi, mu_r, mu_f, Q, normalization,
allowed_backward_evolved_flavors='ALL'):
""" Return an instance of BeamFactorizationCurrentEvaluation, whose 'values' entry
are dictionaries specifying the counterterm in flavor space, for the value of xi
specified in argument."""
# Obtain Q_square.
Q_square = Q.square()
# Only up to the order epsilon^2 of the scales prefactor matters here.
logMuQ = log(mu_r**2/Q_square)
prefactor = EpsilonExpansion({ 0 : 1., 1 : logMuQ, 2 : 0.5*logMuQ**2 })
prefactor *= normalization
# The additional 1/x part of the prefactor is included later during the PDF
# convolution of the event (using its 'Bjorken rescaling' attribute) because
# we must make sure that the plus distribution hits on it.
# Also, the same 1/x appears in the PDF counterterms as a result of the change
# of variable necessary to bring them in the form where the plus distribution
# only acts on the PDF. So it makes sense to keep it completely factorised.
# Input variables
y_0 = factors_and_cuts.y_0_prime
logy0 = log(y_0)
# Assign a fake x for now if the distribution type is 'endpoint'
# TODO: this is not optimal, eventually we should put each of these three pieces in
# separate currents
if self.distribution_type == 'endpoint':
x = 0.5
else:
x = xi
# In MadNkLO, we use the change of variable xb' = xb*xi so that the factor
# (Q^2)^\eps in Eq. 5.21 of https://arxiv.org/pdf/0903.1218.pdf actually reads
# (Q^2/(xi1*xi2))^\eps and the '+' distributions also act on it, which we realize
# by simply multiplying the Q^2 provided by the xi factor that must be set to one.
logMuQ_plus = log(mu_r**2/(Q_square*x))
prefactor_plus = EpsilonExpansion({ 0 : 1., 1 : logMuQ_plus, 2 : 0.5*logMuQ_plus**2 })
prefactor_plus *= normalization
log1mx = log(1.-x)
# Heaviside
theta_x_1my0 = 1. if (x-(1-y_0)) >= 0. else 0.
theta_1my0_x = 1. if ((1-y_0)-x) >= 0. else 0.
# Define the NLO QCD integrate initial-state single collinear counterterms kernels
color_factor = self.CA
kernel_gg = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -2.*( 1./(1.-x) + (1.-x)/x - 1 + x*(1-x) ),
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) + (2.*logy0/(1.-x))*theta_1my0_x
+ 2.*( ((1.-x)/x) -1. + x*(1.-x) )*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x )
})),
'counterterm' : prefactor_plus*color_factor*(EpsilonExpansion({
-1 : -2.* ( 1./(1.-x) ) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) ,
})),
'endpoint' : prefactor*color_factor*(EpsilonExpansion({
-2 : 1. ,
-1 : 0. ,
0 : -(math.pi**2/6.) + logy0**2
}))
}
color_factor = self.CA
kernel_gq = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -(self.CF/self.CA)*(1.+(1.-x)**2) / x ,
0 : (self.CF/self.CA)*( ((1.+(1.-x)**2)/x)*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x ) + x )
})),
'counterterm' : None,
'endpoint' : None
}
color_factor = self.CF
kernel_qg = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -(self.TR/self.CF)*(x**2+(1.-x)**2) ,
0 : (self.TR/self.CF)*( (x**2 + (1.-x)**2)*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x ) + 2.*x*(1.-x) )
})),
'counterterm' : None,
'endpoint' : None
}
color_factor = self.CF
kernel_qq = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -((1.+x**2)/(1.-x)) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) + (2.*logy0/(1.-x))*theta_1my0_x
- ( (1.+x)*( log1mx*(1.+theta_x_1my0)+logy0*theta_1my0_x ) -1.+x )
})),
'counterterm' : prefactor_plus*color_factor*(EpsilonExpansion({
-1 : -((1.+x**2)/(1.-x)) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) ,
})),
'endpoint' : prefactor*color_factor*(EpsilonExpansion({
-2 : 1. ,
-1 : 3./2. ,
0 : -(math.pi**2/6.) + logy0**2
}))
}
active_quark_PDGs = tuple([pdg for pdg in range(1,7)+range(-1,-7,-1)
if pdg in self.beam_PDGs])
# Build the NLO flavor matrix
flavor_matrix = {}
for reduced_flavor in self.beam_PDGs:
# Gluon backward evolution
if reduced_flavor==21:
gluon_dict = {}
if kernel_gg[self.distribution_type] is not None:
gluon_dict[(21,)] = kernel_gg[self.distribution_type]
if active_quark_PDGs and kernel_gq[self.distribution_type] is not None:
gluon_dict[active_quark_PDGs] = kernel_gq[self.distribution_type]
if gluon_dict:
flavor_matrix[21] = gluon_dict
# Quark backward evolution
if reduced_flavor in active_quark_PDGs:
quark_dict = {}
if kernel_qg[self.distribution_type] is not None:
quark_dict[(21,)] = kernel_qg[self.distribution_type]
if kernel_qq[self.distribution_type] is not None:
quark_dict[(reduced_flavor,)] = kernel_qq[self.distribution_type]
if quark_dict:
flavor_matrix[reduced_flavor] = quark_dict
# Truncate all entries of the flavor matrix so as to remove irrelevant O(\eps) terms
for flav_in, flav_outs in flavor_matrix.items():
for flav_out, eps_expansion in flav_outs.items():
eps_expansion.truncate(max_power=0)
# Now apply the mask 'allowed_backward_evolved_flavors' if not set to 'ALL'
filtered_flavor_matrix = self.apply_flavor_mask(flavor_matrix,allowed_backward_evolved_flavors)
# Now assign the flavor matrix in the BeamFactorizationCurrentEvaluation instance
evaluation = utils.BeamFactorizationCurrentEvaluation({
'spin_correlations' : [None,],
'color_correlations' : [None,],
'values' : { (0,0) : filtered_flavor_matrix }
})
return evaluation
#=========================================================================================
# PDF integrated initial-state single soft-collinear counterterm
#=========================================================================================
class QCD_beam_factorization_single_softcollinear(currents.QCDBeamFactorizationCurrent):
"""Implements the NLO QCD initial-state single soft-collinear integgratated counterterm
of type F(xi). These are zero here since they have already been accounted for
in the soft counterterms."""
distribution_types_implemented_in_this_class = ['bulk','counterterm','endpoint']
# These integrated contributions are not really directly related to the physical
# properties of beam factorization (for instance they don't act on the flavor space) and
# therefore apply independely of it.
beam_types_implemented_in_this_class = 'ALL'
beam_PDGs_implemented_in_this_class = 'ALL'
# The soft-collinear integrated counterterm has been accounted for completely in the
# soft integrated counterterm
is_zero = True
def __init__(self, *args, **opts):
# Make sure it is initialized with the proper set of options and remove them
# before calling the mother constructor
if 'color_charge' not in opts:
raise CurrentImplementationError(
"The current '%s' must be instantiated with "%self.__class__.__name__+
" a 'color_charge' option specified.")
color_charge = opts.pop('color_charge')
super(QCD_beam_factorization_single_softcollinear, self).__init__(*args, **opts)
self.supports_helicity_assignment = False
# At this state color_charge is the string of the argument to retrieve ('CA' or 'CF')
# And now that the mother constructor is called, the group factors have been initialized
# and we can retrieve them.
self.color_charge = getattr(self, color_charge)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None:
return None
# If this is a BF current it will not have substructures
ss = current.get('singular_structure')
if len(ss.substructures)==0:
return None
# Retrieve singular structure
ss = current.get('singular_structure').substructures[0]
# Check that it involves exactly one collinear structure with two legs.
if len(ss.substructures)!=1:
return None
# Finally check that the singular structure and PDG matches
singular_structure = ss.substructures[0]
# It main structure should be of collinear type
if singular_structure.name()!='C':
return None
# It should have only one leg left, the other one being in the nested soft structure
# It must be an initial-state leg.
if len(singular_structure.legs)!=1:
return None
# The leg not soft must be quark or a gluon
if not abs(singular_structure.legs[0].pdg) in [21,]+range(1,7):
return None
# It should have exactly one nested structures
if len(singular_structure.substructures)!=1:
return None
sub_singular_structure = singular_structure.substructures[0]
# Make sure this substructure is soft
if sub_singular_structure.name()!='S':
return None
# Make sure it contains a single soft leg
if len(sub_singular_structure.legs)!=1:
return None
soft_leg = sub_singular_structure.legs[0]
# Make sure the soft leg is massless final and a gluon
if model.get_particle(soft_leg.pdg).get('mass').upper()!='ZERO':
return None
if soft_leg.pdg != 21:
return None
# We now know that this current is implemented here. We return
# the specific color charge to instantiate this kernel with,
# in the form of a the name of the group factor to retrieve upon
# initialization.
if singular_structure.legs[0].pdg == 21:
# This is a 'g > g g' soft-collinear splitting
init_vars['color_charge'] = 'CA'
else:
# This is a 'q > g g' soft-collinear splitting
init_vars['color_charge'] = 'CA'
return init_vars
| 44.859574
| 125
| 0.5738
|
kernel_gq = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -(self.CF/self.CA)*(1.+(1.-x)**2) / x ,
0 : (self.CF/self.CA)*( ((1.+(1.-x)**2)/x)*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x ) + x )
})),
'counterterm' : None,
'endpoint' : None
}
color_factor = self.CF
kernel_qg = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -(self.TR/self.CF)*(x**2+(1.-x)**2) ,
0 : (self.TR/self.CF)*( (x**2 + (1.-x)**2)*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x ) + 2.*x*(1.-x) )
})),
'counterterm' : None,
'endpoint' : None
}
color_factor = self.CF
kernel_qq = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -((1.+x**2)/(1.-x)) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) + (2.*logy0/(1.-x))*theta_1my0_x
- ( (1.+x)*( log1mx*(1.+theta_x_1my0)+logy0*theta_1my0_x ) -1.+x )
})),
'counterterm' : prefactor_plus*color_factor*(EpsilonExpansion({
-1 : -((1.+x**2)/(1.-x)) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) ,
})),
'endpoint' : prefactor*color_factor*(EpsilonExpansion({
-2 : 1. ,
-1 : 3./2. ,
0 : -(math.pi**2/6.) + logy0**2
}))
}
active_quark_PDGs = tuple([pdg for pdg in range(1,7)+range(-1,-7,-1)
if pdg in self.beam_PDGs])
# Build the NLO flavor matrix
flavor_matrix = {}
for reduced_flavor in self.beam_PDGs:
# Gluon backward evolution
if reduced_flavor==21:
gluon_dict = {}
if kernel_gg[self.distribution_type] is not None:
gluon_dict[(21,)] = kernel_gg[self.distribution_type]
if active_quark_PDGs and kernel_gq[self.distribution_type] is not None:
gluon_dict[active_quark_PDGs] = kernel_gq[self.distribution_type]
if gluon_dict:
flavor_matrix[21] = gluon_dict
# Quark backward evolution
if reduced_flavor in active_quark_PDGs:
quark_dict = {}
if kernel_qg[self.distribution_type] is not None:
quark_dict[(21,)] = kernel_qg[self.distribution_type]
if kernel_qq[self.distribution_type] is not None:
quark_dict[(reduced_flavor,)] = kernel_qq[self.distribution_type]
if quark_dict:
flavor_matrix[reduced_flavor] = quark_dict
# Truncate all entries of the flavor matrix so as to remove irrelevant O(\eps) terms
for flav_in, flav_outs in flavor_matrix.items():
for flav_out, eps_expansion in flav_outs.items():
eps_expansion.truncate(max_power=0)
# Now apply the mask 'allowed_backward_evolved_flavors' if not set to 'ALL'
filtered_flavor_matrix = self.apply_flavor_mask(flavor_matrix,allowed_backward_evolved_flavors)
# Now assign the flavor matrix in the BeamFactorizationCurrentEvaluation instance
evaluation = utils.BeamFactorizationCurrentEvaluation({
'spin_correlations' : [None,],
'color_correlations' : [None,],
'values' : { (0,0) : filtered_flavor_matrix }
})
return evaluation
#=========================================================================================
# PDF integrated initial-state single soft-collinear counterterm
#=========================================================================================
class QCD_beam_factorization_single_softcollinear(currents.QCDBeamFactorizationCurrent):
distribution_types_implemented_in_this_class = ['bulk','counterterm','endpoint']
# These integrated contributions are not really directly related to the physical
# properties of beam factorization (for instance they don't act on the flavor space) and
beam_types_implemented_in_this_class = 'ALL'
beam_PDGs_implemented_in_this_class = 'ALL'
is_zero = True
def __init__(self, *args, **opts):
if 'color_charge' not in opts:
raise CurrentImplementationError(
"The current '%s' must be instantiated with "%self.__class__.__name__+
" a 'color_charge' option specified.")
color_charge = opts.pop('color_charge')
super(QCD_beam_factorization_single_softcollinear, self).__init__(*args, **opts)
self.supports_helicity_assignment = False
self.color_charge = getattr(self, color_charge)
@classmethod
def does_implement_this_current(cls, current, model):
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None:
return None
ss = current.get('singular_structure')
if len(ss.substructures)==0:
return None
ss = current.get('singular_structure').substructures[0]
if len(ss.substructures)!=1:
return None
singular_structure = ss.substructures[0]
if singular_structure.name()!='C':
return None
if len(singular_structure.legs)!=1:
return None
if not abs(singular_structure.legs[0].pdg) in [21,]+range(1,7):
return None
if len(singular_structure.substructures)!=1:
return None
sub_singular_structure = singular_structure.substructures[0]
if sub_singular_structure.name()!='S':
return None
if len(sub_singular_structure.legs)!=1:
return None
soft_leg = sub_singular_structure.legs[0]
if model.get_particle(soft_leg.pdg).get('mass').upper()!='ZERO':
return None
if soft_leg.pdg != 21:
return None
if singular_structure.legs[0].pdg == 21:
init_vars['color_charge'] = 'CA'
else:
init_vars['color_charge'] = 'CA'
return init_vars
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.