code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import os
import click
import logging
from shclassify import Tree, log
usage_log_path = os.path.abspath(__file__) + '.log'
usage = logging.FileHandler(usage_log_path)
usage.setLevel(logging.INFO)
usage_fmt = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
usage.setFormatter(usage_fmt)
usage_log = logging.getLogger('usage')
usage_log.propagate = False
usage_log.addHandler(usage)
console = logging.StreamHandler()
console_fmt = logging.Formatter('%(name)-12s %(levelname)-8s %(message)s')
console.setFormatter(console_fmt)
log.addHandler(console)
def create_output_path(ctx, param, value):
path = value
if path is None:
path = ctx.params.get('observations_file')
path += '.pred'
return path
@click.command('shclassify',
help=('Predict landcover class for \'OBSERVATIONS_FILE\''
' using SLS HRIS model'))
@click.argument('observations-file', type=click.Path(exists=True))
@click.option('--delim', '-d', default=',',
type=click.Choice([',', r'\t', ';']),
help='field delimeter')
@click.option('--index-col', '-i', default=0, type=int,
help='index of column with observation IDs - 0 is first column')
@click.option('--chunksize', '-c', default=100000, type=int,
help='lines to read and predict at a time')
@click.option('--verbose', '-v', is_flag=True)
@click.option('--outfile', '-o', callback=create_output_path,
type=click.Path(),
help='path to use for output (prediction) data')
def cli(observations_file, delim, index_col, chunksize, verbose, outfile):
msg = '%s invoked cli' %os.environ.get('USER', 'anonymous')
usage_log.info(msg)
level = logging.INFO if verbose else logging.WARNING
console.setLevel(level)
click.echo('Creating classification tree')
tree = Tree()
click.echo(
'Predicting classes for observations in {}'.format(observations_file)
)
tree.predict_file(observations_file, outfile,
overwrite=False, index_col=index_col, sep=delim,
chunksize=chunksize)
click.echo('Predictions saved to file: {}'.format(outfile)) | shclassify/scripts/cli.py | import os
import click
import logging
from shclassify import Tree, log
usage_log_path = os.path.abspath(__file__) + '.log'
usage = logging.FileHandler(usage_log_path)
usage.setLevel(logging.INFO)
usage_fmt = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
usage.setFormatter(usage_fmt)
usage_log = logging.getLogger('usage')
usage_log.propagate = False
usage_log.addHandler(usage)
console = logging.StreamHandler()
console_fmt = logging.Formatter('%(name)-12s %(levelname)-8s %(message)s')
console.setFormatter(console_fmt)
log.addHandler(console)
def create_output_path(ctx, param, value):
path = value
if path is None:
path = ctx.params.get('observations_file')
path += '.pred'
return path
@click.command('shclassify',
help=('Predict landcover class for \'OBSERVATIONS_FILE\''
' using SLS HRIS model'))
@click.argument('observations-file', type=click.Path(exists=True))
@click.option('--delim', '-d', default=',',
type=click.Choice([',', r'\t', ';']),
help='field delimeter')
@click.option('--index-col', '-i', default=0, type=int,
help='index of column with observation IDs - 0 is first column')
@click.option('--chunksize', '-c', default=100000, type=int,
help='lines to read and predict at a time')
@click.option('--verbose', '-v', is_flag=True)
@click.option('--outfile', '-o', callback=create_output_path,
type=click.Path(),
help='path to use for output (prediction) data')
def cli(observations_file, delim, index_col, chunksize, verbose, outfile):
msg = '%s invoked cli' %os.environ.get('USER', 'anonymous')
usage_log.info(msg)
level = logging.INFO if verbose else logging.WARNING
console.setLevel(level)
click.echo('Creating classification tree')
tree = Tree()
click.echo(
'Predicting classes for observations in {}'.format(observations_file)
)
tree.predict_file(observations_file, outfile,
overwrite=False, index_col=index_col, sep=delim,
chunksize=chunksize)
click.echo('Predictions saved to file: {}'.format(outfile)) | 0.294621 | 0.058158 |
import numpy as np
from pysb.simulator.scipyode import ScipyOdeSimulator
from pysb.tools.sensitivity_analysis import \
InitialsSensitivity
from pysb.examples.tyson_oscillator import model
tspan = np.linspace(0, 200, 5001)
def obj_func_cell_cycle(trajectory):
"""
Calculate the frequency of the Y3
Parameters
----------
trajectory : vector_like
Simulation trajectory for the Y3 observable
Returns
-------
local_freq : float
frequency value of Y3 observable
"""
timestep = tspan[:-1]
y = trajectory[:-1] - trajectory[1:]
freq = 0
local_times = []
prev = y[0]
# easy calculation of frequency,
# find two positions where slope changes
for n in range(1, len(y)):
if y[n] > 0 > prev:
local_times.append(timestep[n])
freq += 1
prev = y[n]
local_times = np.array(local_times)
local_freq = np.average(local_times)/len(local_times)*2
return local_freq
def run():
# The observable of the model
observable = 'Y3'
# The values of each initial concentration to samples
# These values will be per initial concentration
vals = [.8, 1.0, 1.2]
# need to create a solver to run the model
solver = ScipyOdeSimulator(model, tspan)
# initialize the sensitivity class
sens = InitialsSensitivity(
values_to_sample=vals,
observable=observable,
objective_function=obj_func_cell_cycle,
solver=solver
)
# runs the function, can pass save_name and out_dir to save sens matrices
sens.run()
# some sample plotting commands to help view the sensitivities
sens.create_individual_pairwise_plots(save_name='pairwise_individual',
out_dir='tyson_sensitivity')
sens.create_plot_p_h_pprime(save_name='matrices',
out_dir='tyson_sensitivity')
# creates a heatplot of all initial concentration in a mirrored grid
# also decomposed heatplot into single initial concentration species
sens.create_boxplot_and_heatplot(save_name='tyson_sensitivity',
out_dir='tyson_sensitivity',
show=False)
print("Results saved in tyson_sensitivity directory")
if __name__ == '__main__':
run() | pysb/examples/tools/run_sensitivity_analysis_tyson.py | import numpy as np
from pysb.simulator.scipyode import ScipyOdeSimulator
from pysb.tools.sensitivity_analysis import \
InitialsSensitivity
from pysb.examples.tyson_oscillator import model
tspan = np.linspace(0, 200, 5001)
def obj_func_cell_cycle(trajectory):
"""
Calculate the frequency of the Y3
Parameters
----------
trajectory : vector_like
Simulation trajectory for the Y3 observable
Returns
-------
local_freq : float
frequency value of Y3 observable
"""
timestep = tspan[:-1]
y = trajectory[:-1] - trajectory[1:]
freq = 0
local_times = []
prev = y[0]
# easy calculation of frequency,
# find two positions where slope changes
for n in range(1, len(y)):
if y[n] > 0 > prev:
local_times.append(timestep[n])
freq += 1
prev = y[n]
local_times = np.array(local_times)
local_freq = np.average(local_times)/len(local_times)*2
return local_freq
def run():
# The observable of the model
observable = 'Y3'
# The values of each initial concentration to samples
# These values will be per initial concentration
vals = [.8, 1.0, 1.2]
# need to create a solver to run the model
solver = ScipyOdeSimulator(model, tspan)
# initialize the sensitivity class
sens = InitialsSensitivity(
values_to_sample=vals,
observable=observable,
objective_function=obj_func_cell_cycle,
solver=solver
)
# runs the function, can pass save_name and out_dir to save sens matrices
sens.run()
# some sample plotting commands to help view the sensitivities
sens.create_individual_pairwise_plots(save_name='pairwise_individual',
out_dir='tyson_sensitivity')
sens.create_plot_p_h_pprime(save_name='matrices',
out_dir='tyson_sensitivity')
# creates a heatplot of all initial concentration in a mirrored grid
# also decomposed heatplot into single initial concentration species
sens.create_boxplot_and_heatplot(save_name='tyson_sensitivity',
out_dir='tyson_sensitivity',
show=False)
print("Results saved in tyson_sensitivity directory")
if __name__ == '__main__':
run() | 0.859487 | 0.687155 |
import os
import sys
sys.path.append('../..')
from Lib.ConfigClass import Config, singular_colors
import json
scene_path = '/home/wangsd/Workspace/foliation-results/outputs/scenes/paper/teaser/'
output_path = '/pub/data/wangsd/images/teaser'
envmap_path = '/home/wangsd/Workspace/cg/data/envmap/gl-hdr-02.hdr'
checkerboard1 = '/home/wangsd/Workspace/cg/data/texture/checkerboard_10_color3.png'
checkerboard2 = '/home/wangsd/Workspace/cg/data/texture/checkerboard_10_color4.png'
checkerboard3 = '/home/wangsd/Workspace/cg/data/texture/checkerboard_10_color5.png'
material1 = 'Knittr.blend'
for root, dirs, _ in os.walk(scene_path):
for d in dirs:
if os.path.exists(os.path.join(root, d, 'mesh.obj')):
print('Processing', os.path.join(root, d))
config_dir = os.path.join(root, d, 'configs')
if not os.path.exists(config_dir):
os.mkdir(config_dir)
config = Config()
config.scene_path = os.path.join(root, d + '/')
config.envmap_path = envmap_path
config.transform_json_name = 'transform.json'
config.mode = 'single'
config.use_envmap = True
config.width = 2000
config.height = 2000
config.plane = None
config.material = None
config.show_loops = False
config.zero_scale = 0.02
config.pole_scale = 0.02
config.show_singularities = True
config.cut_mode = 'Plain'
config.uv_add = (0.05, 0.05)
if True:
config.material_filename = material1
config.texture_path = checkerboard1
config.uv_multiply = (2.0, 0.0)
config.output_path = os.path.join(output_path, d + '.png')
config.save_config(os.path.join(config_dir, 'config.1.json'))
config.uv_multiply = (2.0, 2.0)
config.output_path = os.path.join(output_path, d + '_uv.png')
config.save_config(os.path.join(config_dir, 'config.2.json')) | Blender/Scripts/wangsd/scripts/teaser.py | import os
import sys
sys.path.append('../..')
from Lib.ConfigClass import Config, singular_colors
import json
scene_path = '/home/wangsd/Workspace/foliation-results/outputs/scenes/paper/teaser/'
output_path = '/pub/data/wangsd/images/teaser'
envmap_path = '/home/wangsd/Workspace/cg/data/envmap/gl-hdr-02.hdr'
checkerboard1 = '/home/wangsd/Workspace/cg/data/texture/checkerboard_10_color3.png'
checkerboard2 = '/home/wangsd/Workspace/cg/data/texture/checkerboard_10_color4.png'
checkerboard3 = '/home/wangsd/Workspace/cg/data/texture/checkerboard_10_color5.png'
material1 = 'Knittr.blend'
for root, dirs, _ in os.walk(scene_path):
for d in dirs:
if os.path.exists(os.path.join(root, d, 'mesh.obj')):
print('Processing', os.path.join(root, d))
config_dir = os.path.join(root, d, 'configs')
if not os.path.exists(config_dir):
os.mkdir(config_dir)
config = Config()
config.scene_path = os.path.join(root, d + '/')
config.envmap_path = envmap_path
config.transform_json_name = 'transform.json'
config.mode = 'single'
config.use_envmap = True
config.width = 2000
config.height = 2000
config.plane = None
config.material = None
config.show_loops = False
config.zero_scale = 0.02
config.pole_scale = 0.02
config.show_singularities = True
config.cut_mode = 'Plain'
config.uv_add = (0.05, 0.05)
if True:
config.material_filename = material1
config.texture_path = checkerboard1
config.uv_multiply = (2.0, 0.0)
config.output_path = os.path.join(output_path, d + '.png')
config.save_config(os.path.join(config_dir, 'config.1.json'))
config.uv_multiply = (2.0, 2.0)
config.output_path = os.path.join(output_path, d + '_uv.png')
config.save_config(os.path.join(config_dir, 'config.2.json')) | 0.088618 | 0.045948 |
from os import mkdir, rmdir, getcwd
from os.path import join, exists
from shutil import rmtree
from python_utility.powerline.vagrant import VagrantSegment
from tests.constants import TEMPORARY_DIRECTORY
# TODO: The vagrant sub-process cannot access the temporary directory. What is
# a better practice? The insecure directory above accepted on SonarQube for
# now.
# TEMPORARY_DIRECTORY = tempfile.TemporaryDirectory(dir='/tmp').name
def test_create_test_directory() -> None:
if exists(TEMPORARY_DIRECTORY):
rmtree(TEMPORARY_DIRECTORY)
mkdir(TEMPORARY_DIRECTORY)
assert exists(TEMPORARY_DIRECTORY)
def test_vagrant_file_exists() -> None:
assert VagrantSegment.vagrant_file_exists(getcwd())
assert not VagrantSegment.vagrant_file_exists(TEMPORARY_DIRECTORY)
def test_vagrant_status_raises_on_empty_directory() -> None:
assert VagrantSegment.vagrant_status(TEMPORARY_DIRECTORY) == 'unknown'
def test_callable_no_vagrant_directory() -> None:
segment = VagrantSegment()
segment_info = {
'shortened_path': TEMPORARY_DIRECTORY
}
assert segment(None, segment_info, None) == [{
'contents': '.vagrant directory not found',
'highlight_groups': ['information:regular'],
}]
def test_callable_no_vagrant_file() -> None:
segment = VagrantSegment()
segment_info = {
'shortened_path': TEMPORARY_DIRECTORY
}
vagrant_directory = join(TEMPORARY_DIRECTORY, '.vagrant')
mkdir(vagrant_directory)
assert segment(None, segment_info, None) == [{
'contents': 'Vagrantfile not found',
'highlight_groups': ['information:regular'],
}]
rmdir(vagrant_directory)
def test_callable_vagrant_file_and_directory_exist() -> None:
segment = VagrantSegment()
segment_info = {
'shortened_path': TEMPORARY_DIRECTORY
}
vagrant_directory = join(TEMPORARY_DIRECTORY, '.vagrant')
mkdir(vagrant_directory)
with open(join(TEMPORARY_DIRECTORY, 'Vagrantfile'), 'w') as fp:
pass
assert segment(None, segment_info, None) == [{
'contents': 'not created',
'highlight_groups': ['information:regular'],
}]
rmtree(vagrant_directory)
def test_vagrant_directory_exists() -> None:
vagrant_directory = join(TEMPORARY_DIRECTORY, '.vagrant')
mkdir(vagrant_directory)
assert VagrantSegment.vagrant_directory_exists(TEMPORARY_DIRECTORY)
rmdir(vagrant_directory)
assert not VagrantSegment.vagrant_directory_exists(TEMPORARY_DIRECTORY)
def test_remove_test_directory() -> None:
rmtree(TEMPORARY_DIRECTORY)
assert not exists(TEMPORARY_DIRECTORY) | tests/powerline/test_vagrant.py | from os import mkdir, rmdir, getcwd
from os.path import join, exists
from shutil import rmtree
from python_utility.powerline.vagrant import VagrantSegment
from tests.constants import TEMPORARY_DIRECTORY
# TODO: The vagrant sub-process cannot access the temporary directory. What is
# a better practice? The insecure directory above accepted on SonarQube for
# now.
# TEMPORARY_DIRECTORY = tempfile.TemporaryDirectory(dir='/tmp').name
def test_create_test_directory() -> None:
if exists(TEMPORARY_DIRECTORY):
rmtree(TEMPORARY_DIRECTORY)
mkdir(TEMPORARY_DIRECTORY)
assert exists(TEMPORARY_DIRECTORY)
def test_vagrant_file_exists() -> None:
assert VagrantSegment.vagrant_file_exists(getcwd())
assert not VagrantSegment.vagrant_file_exists(TEMPORARY_DIRECTORY)
def test_vagrant_status_raises_on_empty_directory() -> None:
assert VagrantSegment.vagrant_status(TEMPORARY_DIRECTORY) == 'unknown'
def test_callable_no_vagrant_directory() -> None:
segment = VagrantSegment()
segment_info = {
'shortened_path': TEMPORARY_DIRECTORY
}
assert segment(None, segment_info, None) == [{
'contents': '.vagrant directory not found',
'highlight_groups': ['information:regular'],
}]
def test_callable_no_vagrant_file() -> None:
segment = VagrantSegment()
segment_info = {
'shortened_path': TEMPORARY_DIRECTORY
}
vagrant_directory = join(TEMPORARY_DIRECTORY, '.vagrant')
mkdir(vagrant_directory)
assert segment(None, segment_info, None) == [{
'contents': 'Vagrantfile not found',
'highlight_groups': ['information:regular'],
}]
rmdir(vagrant_directory)
def test_callable_vagrant_file_and_directory_exist() -> None:
segment = VagrantSegment()
segment_info = {
'shortened_path': TEMPORARY_DIRECTORY
}
vagrant_directory = join(TEMPORARY_DIRECTORY, '.vagrant')
mkdir(vagrant_directory)
with open(join(TEMPORARY_DIRECTORY, 'Vagrantfile'), 'w') as fp:
pass
assert segment(None, segment_info, None) == [{
'contents': 'not created',
'highlight_groups': ['information:regular'],
}]
rmtree(vagrant_directory)
def test_vagrant_directory_exists() -> None:
vagrant_directory = join(TEMPORARY_DIRECTORY, '.vagrant')
mkdir(vagrant_directory)
assert VagrantSegment.vagrant_directory_exists(TEMPORARY_DIRECTORY)
rmdir(vagrant_directory)
assert not VagrantSegment.vagrant_directory_exists(TEMPORARY_DIRECTORY)
def test_remove_test_directory() -> None:
rmtree(TEMPORARY_DIRECTORY)
assert not exists(TEMPORARY_DIRECTORY) | 0.226784 | 0.182589 |
import random
import time
from enum import Enum
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.decomposition import NMF
from sklearn.metrics import confusion_matrix
class CurrencyRating(Enum):
CHF = 5
GBP = 6
EUR = 7
USD = 8
NON_SWISS = 10
DEFAULT = 1
def suggest_investments(H: np.ndarray, W: np.ndarray, unique_investments: np.ndarray, unique_portfolios: np.ndarray,
portfolios: pd.DataFrame, extended_positions: pd.DataFrame, potential_investments: list,
potential_investors: list, min_swiss_rating: int = 500, max_nonswiss_rating: int = 800,
prediction_threshold: float = 0.01):
result = {}
for potential_investor in potential_investors:
current_investments = extended_positions.loc[extended_positions['PortfolioID'] == potential_investor]
rating_allowed = check_rating(potential_investor, portfolios, current_investments, max_nonswiss_rating,
min_swiss_rating)
if not rating_allowed:
continue
score = np.array([])
user = find_index(potential_investor, unique_portfolios)
for potential_investment in potential_investments:
# only do the prediction if investment is valid
investment_valid = check_valid_investment(current_investments, potential_investment, unique_investments)
if not investment_valid:
score = np.append(score, 0)
continue
item = find_index(potential_investment, unique_investments)
# compute prediction
dot_product = W[user, :].dot(H[:, item])
score = np.append(score, dot_product)
y_pred = np.where(score >= prediction_threshold, 1, 0)
result[str(potential_investor)] = [str(potential_investments[i]) for i in range(len(y_pred)) if y_pred[i] == 1]
return result
def find_index(value: int, array: np.ndarray):
return np.where(array == value)[0].min()
def check_valid_investment(current_investments: pd.DataFrame, potential_investment: int,
unique_investments: np.ndarray):
if potential_investment in current_investments.values:
return False
if len(np.where(unique_investments == potential_investment)[0]) == 0:
return False
return True
def check_rating(potential_investor: int, portfolios: pd.DataFrame, current_investments: pd.DataFrame,
max_nonswiss_rating: int, min_swiss_rating: int) -> bool:
is_swiss = portfolios.loc[portfolios['PortfolioID'] == potential_investor]['Currency'].values[0] == 'CHF'
if is_swiss:
rating = compute_instrument_rating_for_swiss_clients(current_investments)
else:
rating = compute_instrument_rating_for_non_swiss_clients(current_investments)
if is_swiss and rating < min_swiss_rating:
print("Swiss client " + str(potential_investor) + " rating too low, no investment suggested")
return False
elif not is_swiss and rating > max_nonswiss_rating:
print("Non-Swiss client " + str(potential_investor) + " rating too high, no investment suggested")
return False
return True
def compute_instrument_rating_for_non_swiss_clients(instruments: pd.DataFrame):
rating = len(instruments) * CurrencyRating.NON_SWISS.value
return rating
def compute_instrument_rating_for_swiss_clients(instruments: pd.DataFrame):
rating = 0
valid_instruments = instruments.loc[(~instruments["Ignore"]) & (~instruments["Expired"])]
for currency in valid_instruments["Currency"]:
try:
rating += CurrencyRating[currency].value
except KeyError:
rating += CurrencyRating.DEFAULT.value
return rating
def predict(H: np.ndarray, W: np.ndarray, X_test: np.ndarray, threshold=0.01):
dot_product = [W[user, :].dot(H[:, item]) for user, item in X_test]
y_pred = np.array(dot_product)
return np.where(y_pred >= threshold, 1, 0)
def compute_metrics(y_pred: np.ndarray, y_test: np.ndarray):
confusion = confusion_matrix(y_test, y_pred)
TN, FP, FN, TP = np.ravel(confusion)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
return precision, recall
def add_zeros(X_test: np.ndarray, X_train: np.ndarray, unique_investments: np.ndarray, unique_portfolios: np.ndarray,
y_test: np.ndarray, ratio: float = 1):
# Since our test set also contains only positive examples, we want to add some zero values: we randomly generate a
# pair (user, item) and, if there isn't a position for it, we add it with rating zero
new_length = int(len(X_test) * ratio)
X = np.concatenate((X_train, X_test), axis=0)
while len(X_test) < new_length:
random_user_index = random.randint(0, len(unique_portfolios) - 1)
random_item_index = random.randint(0, len(unique_investments) - 1)
entry = np.array([random_user_index, random_item_index])
if not any(np.equal(X, entry).all(1)):
X_test = np.append(X_test, [entry], axis=0)
y_test = np.append(y_test, 0)
return X_test, y_test
def create_user_item_df(positions: pd.DataFrame):
unique_portfolios, user_indices = compute_unique_values_and_indices(positions["PortfolioID"])
unique_investments, item_indices = compute_unique_values_and_indices(positions["InstrumentID"])
ratings = [1] * len(user_indices)
user_item_rating_df = pd.DataFrame({"User": user_indices,
"Item": item_indices,
"Rating": ratings})
user_item_rating_df = user_item_rating_df.sample(frac=1).reset_index(drop=True)
return unique_investments, unique_portfolios, user_item_rating_df
def compute_unique_values_and_indices(column: pd.Series):
unique_values = column.unique()
indices_mapping = {portfolio: index for index, portfolio in enumerate(unique_values)}
indices = column.map(indices_mapping)
return unique_values, indices
def create_user_item_df_old(positions: pd.DataFrame):
unique_portfolios = positions["PortfolioID"].unique().tolist()
unique_investments = positions["InstrumentID"].unique().tolist()
user_indices = []
item_indices = []
for index, row in positions.iterrows():
user_indices += [unique_portfolios.index(row["PortfolioID"])]
item_indices += [unique_investments.index(row["InstrumentID"])]
ratings = [1] * len(user_indices)
user_item_rating_df = pd.DataFrame({"User": user_indices,
"Item": item_indices,
"Rating": ratings})
user_item_rating_df = user_item_rating_df.sample(frac=1).reset_index(drop=True)
return unique_investments, unique_portfolios, user_item_rating_df
def preprocess(positions: pd.DataFrame):
min_transactions = 3
min_portfolios = 5
positions = _filter_col_min_value(positions, 'PortfolioID', min_transactions)
positions = _filter_col_min_value(positions, 'InstrumentID', min_portfolios)
return positions
def _filter_col_min_value(df: pd.DataFrame, column: str, min_count: int):
counts = df[column].value_counts()
filtered_indices = counts[counts >= min_count].index.tolist()
return df[df[column].isin(filtered_indices)]
if __name__ == '__main__':
DATA_FOLDER = "../../../Data/"
positions = pd.read_csv(DATA_FOLDER + "positions.csv")
portfolios = pd.read_csv(DATA_FOLDER + "portfolios.csv")
instruments = pd.read_csv(DATA_FOLDER + "instruments.csv")
positions = preprocess(positions)
print(f"{len(positions)} positions after preprocessing")
# Create a user-item-rating dataframe, where users are portfolios and items are instruments.
# The ratings will be all 1, because the data we have is only the instruments that have been bought.
# This means we will only train on positive examples
t1 = time.time()
unique_investments, unique_portfolios, user_item_rating_df = create_user_item_df(positions)
t2 = time.time()
print("Building user-item frame took " + str(t2 - t1) + " seconds.")
# # train/test split
X = user_item_rating_df[["User", "Item"]].values
y = user_item_rating_df["Rating"].values
X_train, X_test = X[0:int(len(user_item_rating_df) * 0.8)], X[int(len(user_item_rating_df) * 0.8):]
y_train, y_test = y[0:int(len(user_item_rating_df) * 0.8)], y[int(len(user_item_rating_df) * 0.8):]
# # Train model
X_sparse = sparse.csr_matrix((y_train, (X_train[:, 0], X_train[:, 1])),
shape=(len(unique_portfolios), len(unique_investments)))
model = NMF(
n_components=3,
init='random',
solver='cd',
beta_loss='frobenius',
max_iter=200,
tol=0.0001,
alpha=0,
l1_ratio=0,
random_state=0,
verbose=0,
shuffle=False)
W = model.fit_transform(X_sparse)
H = model.components_
# # Test model
t1 = time.time()
X_test, y_test = add_zeros(X_test, X_train, unique_investments, unique_portfolios, y_test)
t2 = time.time()
print("Adding zeros took " + str(t2 - t1) + " seconds.")
y_pred = predict(H, W, X_test)
# # Visualize metrics
precision, recall = compute_metrics(y_pred, y_test)
print("Precision:", precision)
print("Recall:", recall)
# # Prediction
potential_investors = [42, 69, 420]
potential_investments = list(range(100, 150))
extended_positions = pd.merge(positions, instruments, on='InstrumentID')
result = suggest_investments(H, W, unique_investments, unique_portfolios, portfolios, extended_positions,
potential_investments, potential_investors)
for client in result:
print("Investments suggested for client " + client + ":")
print(", ".join(result[client])) | 03_clean_code/01_ranking_refactor/ranking/ranking_02_removed_basic_smells.py | import random
import time
from enum import Enum
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.decomposition import NMF
from sklearn.metrics import confusion_matrix
class CurrencyRating(Enum):
CHF = 5
GBP = 6
EUR = 7
USD = 8
NON_SWISS = 10
DEFAULT = 1
def suggest_investments(H: np.ndarray, W: np.ndarray, unique_investments: np.ndarray, unique_portfolios: np.ndarray,
portfolios: pd.DataFrame, extended_positions: pd.DataFrame, potential_investments: list,
potential_investors: list, min_swiss_rating: int = 500, max_nonswiss_rating: int = 800,
prediction_threshold: float = 0.01):
result = {}
for potential_investor in potential_investors:
current_investments = extended_positions.loc[extended_positions['PortfolioID'] == potential_investor]
rating_allowed = check_rating(potential_investor, portfolios, current_investments, max_nonswiss_rating,
min_swiss_rating)
if not rating_allowed:
continue
score = np.array([])
user = find_index(potential_investor, unique_portfolios)
for potential_investment in potential_investments:
# only do the prediction if investment is valid
investment_valid = check_valid_investment(current_investments, potential_investment, unique_investments)
if not investment_valid:
score = np.append(score, 0)
continue
item = find_index(potential_investment, unique_investments)
# compute prediction
dot_product = W[user, :].dot(H[:, item])
score = np.append(score, dot_product)
y_pred = np.where(score >= prediction_threshold, 1, 0)
result[str(potential_investor)] = [str(potential_investments[i]) for i in range(len(y_pred)) if y_pred[i] == 1]
return result
def find_index(value: int, array: np.ndarray):
return np.where(array == value)[0].min()
def check_valid_investment(current_investments: pd.DataFrame, potential_investment: int,
unique_investments: np.ndarray):
if potential_investment in current_investments.values:
return False
if len(np.where(unique_investments == potential_investment)[0]) == 0:
return False
return True
def check_rating(potential_investor: int, portfolios: pd.DataFrame, current_investments: pd.DataFrame,
max_nonswiss_rating: int, min_swiss_rating: int) -> bool:
is_swiss = portfolios.loc[portfolios['PortfolioID'] == potential_investor]['Currency'].values[0] == 'CHF'
if is_swiss:
rating = compute_instrument_rating_for_swiss_clients(current_investments)
else:
rating = compute_instrument_rating_for_non_swiss_clients(current_investments)
if is_swiss and rating < min_swiss_rating:
print("Swiss client " + str(potential_investor) + " rating too low, no investment suggested")
return False
elif not is_swiss and rating > max_nonswiss_rating:
print("Non-Swiss client " + str(potential_investor) + " rating too high, no investment suggested")
return False
return True
def compute_instrument_rating_for_non_swiss_clients(instruments: pd.DataFrame):
rating = len(instruments) * CurrencyRating.NON_SWISS.value
return rating
def compute_instrument_rating_for_swiss_clients(instruments: pd.DataFrame):
rating = 0
valid_instruments = instruments.loc[(~instruments["Ignore"]) & (~instruments["Expired"])]
for currency in valid_instruments["Currency"]:
try:
rating += CurrencyRating[currency].value
except KeyError:
rating += CurrencyRating.DEFAULT.value
return rating
def predict(H: np.ndarray, W: np.ndarray, X_test: np.ndarray, threshold=0.01):
dot_product = [W[user, :].dot(H[:, item]) for user, item in X_test]
y_pred = np.array(dot_product)
return np.where(y_pred >= threshold, 1, 0)
def compute_metrics(y_pred: np.ndarray, y_test: np.ndarray):
confusion = confusion_matrix(y_test, y_pred)
TN, FP, FN, TP = np.ravel(confusion)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
return precision, recall
def add_zeros(X_test: np.ndarray, X_train: np.ndarray, unique_investments: np.ndarray, unique_portfolios: np.ndarray,
y_test: np.ndarray, ratio: float = 1):
# Since our test set also contains only positive examples, we want to add some zero values: we randomly generate a
# pair (user, item) and, if there isn't a position for it, we add it with rating zero
new_length = int(len(X_test) * ratio)
X = np.concatenate((X_train, X_test), axis=0)
while len(X_test) < new_length:
random_user_index = random.randint(0, len(unique_portfolios) - 1)
random_item_index = random.randint(0, len(unique_investments) - 1)
entry = np.array([random_user_index, random_item_index])
if not any(np.equal(X, entry).all(1)):
X_test = np.append(X_test, [entry], axis=0)
y_test = np.append(y_test, 0)
return X_test, y_test
def create_user_item_df(positions: pd.DataFrame):
unique_portfolios, user_indices = compute_unique_values_and_indices(positions["PortfolioID"])
unique_investments, item_indices = compute_unique_values_and_indices(positions["InstrumentID"])
ratings = [1] * len(user_indices)
user_item_rating_df = pd.DataFrame({"User": user_indices,
"Item": item_indices,
"Rating": ratings})
user_item_rating_df = user_item_rating_df.sample(frac=1).reset_index(drop=True)
return unique_investments, unique_portfolios, user_item_rating_df
def compute_unique_values_and_indices(column: pd.Series):
unique_values = column.unique()
indices_mapping = {portfolio: index for index, portfolio in enumerate(unique_values)}
indices = column.map(indices_mapping)
return unique_values, indices
def create_user_item_df_old(positions: pd.DataFrame):
unique_portfolios = positions["PortfolioID"].unique().tolist()
unique_investments = positions["InstrumentID"].unique().tolist()
user_indices = []
item_indices = []
for index, row in positions.iterrows():
user_indices += [unique_portfolios.index(row["PortfolioID"])]
item_indices += [unique_investments.index(row["InstrumentID"])]
ratings = [1] * len(user_indices)
user_item_rating_df = pd.DataFrame({"User": user_indices,
"Item": item_indices,
"Rating": ratings})
user_item_rating_df = user_item_rating_df.sample(frac=1).reset_index(drop=True)
return unique_investments, unique_portfolios, user_item_rating_df
def preprocess(positions: pd.DataFrame):
min_transactions = 3
min_portfolios = 5
positions = _filter_col_min_value(positions, 'PortfolioID', min_transactions)
positions = _filter_col_min_value(positions, 'InstrumentID', min_portfolios)
return positions
def _filter_col_min_value(df: pd.DataFrame, column: str, min_count: int):
counts = df[column].value_counts()
filtered_indices = counts[counts >= min_count].index.tolist()
return df[df[column].isin(filtered_indices)]
if __name__ == '__main__':
DATA_FOLDER = "../../../Data/"
positions = pd.read_csv(DATA_FOLDER + "positions.csv")
portfolios = pd.read_csv(DATA_FOLDER + "portfolios.csv")
instruments = pd.read_csv(DATA_FOLDER + "instruments.csv")
positions = preprocess(positions)
print(f"{len(positions)} positions after preprocessing")
# Create a user-item-rating dataframe, where users are portfolios and items are instruments.
# The ratings will be all 1, because the data we have is only the instruments that have been bought.
# This means we will only train on positive examples
t1 = time.time()
unique_investments, unique_portfolios, user_item_rating_df = create_user_item_df(positions)
t2 = time.time()
print("Building user-item frame took " + str(t2 - t1) + " seconds.")
# # train/test split
X = user_item_rating_df[["User", "Item"]].values
y = user_item_rating_df["Rating"].values
X_train, X_test = X[0:int(len(user_item_rating_df) * 0.8)], X[int(len(user_item_rating_df) * 0.8):]
y_train, y_test = y[0:int(len(user_item_rating_df) * 0.8)], y[int(len(user_item_rating_df) * 0.8):]
# # Train model
X_sparse = sparse.csr_matrix((y_train, (X_train[:, 0], X_train[:, 1])),
shape=(len(unique_portfolios), len(unique_investments)))
model = NMF(
n_components=3,
init='random',
solver='cd',
beta_loss='frobenius',
max_iter=200,
tol=0.0001,
alpha=0,
l1_ratio=0,
random_state=0,
verbose=0,
shuffle=False)
W = model.fit_transform(X_sparse)
H = model.components_
# # Test model
t1 = time.time()
X_test, y_test = add_zeros(X_test, X_train, unique_investments, unique_portfolios, y_test)
t2 = time.time()
print("Adding zeros took " + str(t2 - t1) + " seconds.")
y_pred = predict(H, W, X_test)
# # Visualize metrics
precision, recall = compute_metrics(y_pred, y_test)
print("Precision:", precision)
print("Recall:", recall)
# # Prediction
potential_investors = [42, 69, 420]
potential_investments = list(range(100, 150))
extended_positions = pd.merge(positions, instruments, on='InstrumentID')
result = suggest_investments(H, W, unique_investments, unique_portfolios, portfolios, extended_positions,
potential_investments, potential_investors)
for client in result:
print("Investments suggested for client " + client + ":")
print(", ".join(result[client])) | 0.589835 | 0.39161 |
from .serializers import ProfileSerializer,UserSerializer,ForgotPasswordSerializer,ResetPasswordSeriliazer
from rest_framework.views import APIView
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import permissions,status
from .models import User
from api.email import EmailSender
import os
@api_view(['GET'])
@permission_classes((permissions.IsAuthenticated,))
def get_current_user(request):
serializer = ProfileSerializer(request.user)
return Response(serializer.data)
class CreateUserView(APIView):
def post(self, request):
user = request.data
if not user:
return Response(data={'type': 'error', 'content': 'No data found'},status= status.HTTP_417_EXPECTATION_FAILED)
serializer = UserSerializer(data=user)
if serializer.is_valid():
serializer.save()
else:
return Response(data={"type": "error", "content": serializer.errors},status= status.HTTP_417_EXPECTATION_FAILED)
return Response(data= serializer.data,status= status.HTTP_201_CREATED)
@api_view(['POST'])
def forgot_password(request):
serializer = ForgotPasswordSerializer(data= request.data)
if not serializer.is_valid():
return Response(data={"type": "error", "content": serializer.errors},status= status.HTTP_417_EXPECTATION_FAILED)
user = None
try:
user = User.objects.get(email= serializer.data["email"])
except Exception:
user = None
if not user:
return Response(data={"type": "error", "content": "Não foi possivel encontrar usuário"},status= status.HTTP_417_EXPECTATION_FAILED)
user.forgot_password_token = user.generate_forgot_password_token()
user.save()
data_email = {
"name": user.first_name,
"forgotPasswordUrl": "%s/forgot?token=%s" %(os.getenv("FRONTEND_URL"),user.forgot_password_token)
}
EmailSender.send(
tos=[user.email],
template_path="email/forgot-password.html",
data=data_email,
subject="Esqueci Minha Senha")
return Response("Verifique seu email para resetar a senha")
@api_view(['POST'])
def reset_password(request):
serializer = ResetPasswordSeriliazer(data= request.data)
if not serializer.is_valid():
return Response(data={"type": "error", "content": serializer.errors},status= status.HTTP_417_EXPECTATION_FAILED)
if not serializer.data['password'] == serializer.data['password_confirmed']:
return Response(data={"type": "error", "content": "As senhas precisam ser iguais!"},status= status.HTTP_417_EXPECTATION_FAILED)
user = None
try :
user = User.objects.get(forgot_password_token= serializer.data["token"])
except :
user = None
if not user:
return Response(data={"type": "error", "content": "Token ínvalido"},status= status.HTTP_417_EXPECTATION_FAILED)
user.set_password(serializer.data['password'])
user.forgot_password_token = None
user.save()
return Response("Password was reset successfully")
@api_view(['GET'])
def verify_email_alredy_exists(request):
query_params = request.query_params
if 'email' not in query_params.keys():
return Response(data={"type": "error", "content": "Email é obrigatório"},status=status.HTTP_400_BAD_REQUEST)
email = query_params['email']
try:
User.objects.get(email = email)
except:
return Response(status=status.HTTP_417_EXPECTATION_FAILED)
return Response(status=status.HTTP_204_NO_CONTENT) | backend/keplerapi/authapi/views.py | from .serializers import ProfileSerializer,UserSerializer,ForgotPasswordSerializer,ResetPasswordSeriliazer
from rest_framework.views import APIView
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import permissions,status
from .models import User
from api.email import EmailSender
import os
@api_view(['GET'])
@permission_classes((permissions.IsAuthenticated,))
def get_current_user(request):
serializer = ProfileSerializer(request.user)
return Response(serializer.data)
class CreateUserView(APIView):
def post(self, request):
user = request.data
if not user:
return Response(data={'type': 'error', 'content': 'No data found'},status= status.HTTP_417_EXPECTATION_FAILED)
serializer = UserSerializer(data=user)
if serializer.is_valid():
serializer.save()
else:
return Response(data={"type": "error", "content": serializer.errors},status= status.HTTP_417_EXPECTATION_FAILED)
return Response(data= serializer.data,status= status.HTTP_201_CREATED)
@api_view(['POST'])
def forgot_password(request):
serializer = ForgotPasswordSerializer(data= request.data)
if not serializer.is_valid():
return Response(data={"type": "error", "content": serializer.errors},status= status.HTTP_417_EXPECTATION_FAILED)
user = None
try:
user = User.objects.get(email= serializer.data["email"])
except Exception:
user = None
if not user:
return Response(data={"type": "error", "content": "Não foi possivel encontrar usuário"},status= status.HTTP_417_EXPECTATION_FAILED)
user.forgot_password_token = user.generate_forgot_password_token()
user.save()
data_email = {
"name": user.first_name,
"forgotPasswordUrl": "%s/forgot?token=%s" %(os.getenv("FRONTEND_URL"),user.forgot_password_token)
}
EmailSender.send(
tos=[user.email],
template_path="email/forgot-password.html",
data=data_email,
subject="Esqueci Minha Senha")
return Response("Verifique seu email para resetar a senha")
@api_view(['POST'])
def reset_password(request):
serializer = ResetPasswordSeriliazer(data= request.data)
if not serializer.is_valid():
return Response(data={"type": "error", "content": serializer.errors},status= status.HTTP_417_EXPECTATION_FAILED)
if not serializer.data['password'] == serializer.data['password_confirmed']:
return Response(data={"type": "error", "content": "As senhas precisam ser iguais!"},status= status.HTTP_417_EXPECTATION_FAILED)
user = None
try :
user = User.objects.get(forgot_password_token= serializer.data["token"])
except :
user = None
if not user:
return Response(data={"type": "error", "content": "Token ínvalido"},status= status.HTTP_417_EXPECTATION_FAILED)
user.set_password(serializer.data['password'])
user.forgot_password_token = None
user.save()
return Response("Password was reset successfully")
@api_view(['GET'])
def verify_email_alredy_exists(request):
query_params = request.query_params
if 'email' not in query_params.keys():
return Response(data={"type": "error", "content": "Email é obrigatório"},status=status.HTTP_400_BAD_REQUEST)
email = query_params['email']
try:
User.objects.get(email = email)
except:
return Response(status=status.HTTP_417_EXPECTATION_FAILED)
return Response(status=status.HTTP_204_NO_CONTENT) | 0.464659 | 0.12692 |
if not request.is_local:
redirect(URL('default', 'index'))
def adminuser():
# http://stackoverflow.com/questions/10201300/how-can-i-create-new-auth-user-and-auth-group-on-web2py-running-on-google-app-en
if not db().select(db.auth_user.ALL).first():
db.auth_user.insert(
username=myconf.get('admin_user.username'),
password=db.auth_user.password.validate(myconf.get('admin_user.password'))[0],
email=myconf.get('admin_user.email'),
first_name=myconf.get('admin_user.first_name'),
last_name=myconf.get('admin_user.last_name'),
)
user = auth.login_bare(
myconf.get('admin_user.username'),
myconf.get('admin_user.password')
)
authgroups()
fixauthgroups()
# load_sample_data()
session.flash = "Initialized!!"
redirect(URL('default', 'index'))
def authgroups():
if not db().select(db.auth_group.ALL).first():
for group in myconf.get('admin_user.auth_groups'):
group_id = db.auth_group.insert(
role=group
)
db.auth_membership.insert(
user_id=1,
group_id=group_id
)
return
def fixauthgroups():
GROUPS = db().select(db.auth_group.ALL)
for group in GROUPS:
group.update_record(
role=group.role.title()
)
return
def load_sample_data():
db.dog.truncate()
db.dog.bulk_insert([
{'title': 'Fido'},
{'title': 'Spot'},
])
db.person.truncate()
db.person.bulk_insert([
{'title': 'John'},
{'title': 'Mary'},
])
db.dog_owner.truncate()
db.dog_owner.bulk_insert([
{'dog': 1, 'person': 1},
{'dog': 1, 'person': 2},
{'dog': 2, 'person': 1},
{'dog': 2, 'person': 2},
])
return
def populate(table):
query = table
set = db(query)
# rows = set.select()
set.delete()
from gluon.contrib.populate import populate
populate(table, 15)
return | controllers/initialize.py |
if not request.is_local:
redirect(URL('default', 'index'))
def adminuser():
# http://stackoverflow.com/questions/10201300/how-can-i-create-new-auth-user-and-auth-group-on-web2py-running-on-google-app-en
if not db().select(db.auth_user.ALL).first():
db.auth_user.insert(
username=myconf.get('admin_user.username'),
password=db.auth_user.password.validate(myconf.get('admin_user.password'))[0],
email=myconf.get('admin_user.email'),
first_name=myconf.get('admin_user.first_name'),
last_name=myconf.get('admin_user.last_name'),
)
user = auth.login_bare(
myconf.get('admin_user.username'),
myconf.get('admin_user.password')
)
authgroups()
fixauthgroups()
# load_sample_data()
session.flash = "Initialized!!"
redirect(URL('default', 'index'))
def authgroups():
if not db().select(db.auth_group.ALL).first():
for group in myconf.get('admin_user.auth_groups'):
group_id = db.auth_group.insert(
role=group
)
db.auth_membership.insert(
user_id=1,
group_id=group_id
)
return
def fixauthgroups():
GROUPS = db().select(db.auth_group.ALL)
for group in GROUPS:
group.update_record(
role=group.role.title()
)
return
def load_sample_data():
db.dog.truncate()
db.dog.bulk_insert([
{'title': 'Fido'},
{'title': 'Spot'},
])
db.person.truncate()
db.person.bulk_insert([
{'title': 'John'},
{'title': 'Mary'},
])
db.dog_owner.truncate()
db.dog_owner.bulk_insert([
{'dog': 1, 'person': 1},
{'dog': 1, 'person': 2},
{'dog': 2, 'person': 1},
{'dog': 2, 'person': 2},
])
return
def populate(table):
query = table
set = db(query)
# rows = set.select()
set.delete()
from gluon.contrib.populate import populate
populate(table, 15)
return | 0.40698 | 0.079282 |
def seating_systm_01(waiting_area):
while(True):
occupied = 0
changed = 0
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
if seat[0] == '#':
y = r - 1
x = c - 1
for i in range(y, y + 3):
for j in range(x, x + 3):
if (i == r and j == c) or (i < 0 or j < 0) or (i >= len(waiting_area) or j >= len(row)):
continue
waiting_area[i][j][1] += 1
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
if seat[0] == 'L' and seat[1] == 0:
waiting_area[r][c][0] = '#'
changed += 1
elif seat[0] == '#' and seat[1] > 3:
waiting_area[r][c][0] = 'L'
changed += 1
if waiting_area[r][c][0] == '#':
occupied += 1
waiting_area[r][c][1] = 0
if changed == 0:
return occupied
def check_seats(seat, waiting_area, rule):
to_check = seat[:]
while(True):
to_check[0] += rule[0]
to_check[1] += rule[1]
if (to_check[0] < 0 or to_check[1] < 0) or (to_check[0] >= len(waiting_area) or to_check[1] >= len(waiting_area[0])):
return 0
if waiting_area[to_check[0]][to_check[1]][0] == '#':
return 1
if waiting_area[to_check[0]][to_check[1]][0] == 'L':
return 0
def check_seat(seat, waiting_area, checker):
rules = [[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1],[-1,0],[-1,1]]
return sum([checker(seat, waiting_area, rule) for rule in rules])
def seating_systm_02(waiting_area):
while(True):
occupied = 0
changed = 0
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
waiting_area[r][c][1] = check_seat([r, c], waiting_area, check_seats)
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
if seat[0] == 'L' and seat[1] == 0:
waiting_area[r][c][0] = '#'
changed += 1
elif seat[0] == '#' and seat[1] > 4:
waiting_area[r][c][0] = 'L'
changed += 1
if waiting_area[r][c][0] == '#':
occupied += 1
waiting_area[r][c][1] = 0
if changed == 0:
return occupied
if __name__ == "__main__":
with open('input.txt') as f:
lines = f.readlines()
with open('output.txt', 'w') as f:
f.write("Part one: {}\n".format(seating_systm_01([[[c, 0] for c in l.strip()] for l in lines])))
f.write("Part two: {}\n".format(seating_systm_02([[[c, 0] for c in l.strip()] for l in lines]))) | 11/seating_system.py | def seating_systm_01(waiting_area):
while(True):
occupied = 0
changed = 0
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
if seat[0] == '#':
y = r - 1
x = c - 1
for i in range(y, y + 3):
for j in range(x, x + 3):
if (i == r and j == c) or (i < 0 or j < 0) or (i >= len(waiting_area) or j >= len(row)):
continue
waiting_area[i][j][1] += 1
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
if seat[0] == 'L' and seat[1] == 0:
waiting_area[r][c][0] = '#'
changed += 1
elif seat[0] == '#' and seat[1] > 3:
waiting_area[r][c][0] = 'L'
changed += 1
if waiting_area[r][c][0] == '#':
occupied += 1
waiting_area[r][c][1] = 0
if changed == 0:
return occupied
def check_seats(seat, waiting_area, rule):
to_check = seat[:]
while(True):
to_check[0] += rule[0]
to_check[1] += rule[1]
if (to_check[0] < 0 or to_check[1] < 0) or (to_check[0] >= len(waiting_area) or to_check[1] >= len(waiting_area[0])):
return 0
if waiting_area[to_check[0]][to_check[1]][0] == '#':
return 1
if waiting_area[to_check[0]][to_check[1]][0] == 'L':
return 0
def check_seat(seat, waiting_area, checker):
rules = [[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1],[-1,0],[-1,1]]
return sum([checker(seat, waiting_area, rule) for rule in rules])
def seating_systm_02(waiting_area):
while(True):
occupied = 0
changed = 0
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
waiting_area[r][c][1] = check_seat([r, c], waiting_area, check_seats)
for r, row in enumerate(waiting_area):
for c, seat in enumerate(row):
if seat[0] == 'L' and seat[1] == 0:
waiting_area[r][c][0] = '#'
changed += 1
elif seat[0] == '#' and seat[1] > 4:
waiting_area[r][c][0] = 'L'
changed += 1
if waiting_area[r][c][0] == '#':
occupied += 1
waiting_area[r][c][1] = 0
if changed == 0:
return occupied
if __name__ == "__main__":
with open('input.txt') as f:
lines = f.readlines()
with open('output.txt', 'w') as f:
f.write("Part one: {}\n".format(seating_systm_01([[[c, 0] for c in l.strip()] for l in lines])))
f.write("Part two: {}\n".format(seating_systm_02([[[c, 0] for c in l.strip()] for l in lines]))) | 0.192539 | 0.505615 |
import os, sys, signal, subprocess
from sense_hat import SenseHat
from time import sleep
from libs.set_color import *
import variables.colors as c
import variables.joystick as j
sense = SenseHat()
sense.clear()
def joystickJoystick(direction):
if direction == "up":
if j.joystick_index == 0:
if j.joystick_r == 255:
j.joystick_r = 0
else:
j.joystick_r += 1
if j.joystick_index == 1:
if j.joystick_g == 255:
j.joystick_g = 0
else:
j.joystick_g += 1
if j.joystick_index == 2:
if j.joystick_b == 255:
j.joystick_b = 0
else:
j.joystick_b += 1
elif direction == "down":
if j.joystick_index == 0:
if j.joystick_r == 0:
j.joystick_r = 255
else:
j.joystick_r -= 1
if j.joystick_index == 1:
if j.joystick_g == 0:
j.joystick_g = 255
else:
j.joystick_g -= 1
if j.joystick_index == 2:
if j.joystick_b == 0:
j.joystick_b = 255
else:
j.joystick_b -= 1
elif direction == "left":
if j.joystick_index == 0:
j.joystick_index = 2
else:
j.joystick_index -= 1
elif direction == "right":
if j.joystick_index == 2:
j.joystick_index = 0
else:
j.joystick_index += 1
c.color = (j.joystick_r, j.joystick_g, j.joystick_b)
def joystickJoystickHeld(direction):
if direction == "up":
if j.joystick_index == 0:
if j.joystick_r == 255:
j.joystick_r = 0
else:
j.joystick_r += 1
if j.joystick_index == 1:
if j.joystick_g == 255:
j.joystick_g = 0
else:
j.joystick_g += 1
if j.joystick_index == 2:
if j.joystick_b == 255:
j.joystick_b = 0
else:
j.joystick_b += 1
elif direction == "down":
if j.joystick_index == 0:
if j.joystick_r == 0:
j.joystick_r = 255
else:
j.joystick_r -= 1
if j.joystick_index == 1:
if j.joystick_g == 0:
j.joystick_g = 255
else:
j.joystick_g -= 1
if j.joystick_index == 2:
if j.joystick_b == 0:
j.joystick_b = 255
else:
j.joystick_b -= 1
c.color = (j.joystick_r, j.joystick_g, j.joystick_b) | smart-lamp/modes/joystick.py | import os, sys, signal, subprocess
from sense_hat import SenseHat
from time import sleep
from libs.set_color import *
import variables.colors as c
import variables.joystick as j
sense = SenseHat()
sense.clear()
def joystickJoystick(direction):
if direction == "up":
if j.joystick_index == 0:
if j.joystick_r == 255:
j.joystick_r = 0
else:
j.joystick_r += 1
if j.joystick_index == 1:
if j.joystick_g == 255:
j.joystick_g = 0
else:
j.joystick_g += 1
if j.joystick_index == 2:
if j.joystick_b == 255:
j.joystick_b = 0
else:
j.joystick_b += 1
elif direction == "down":
if j.joystick_index == 0:
if j.joystick_r == 0:
j.joystick_r = 255
else:
j.joystick_r -= 1
if j.joystick_index == 1:
if j.joystick_g == 0:
j.joystick_g = 255
else:
j.joystick_g -= 1
if j.joystick_index == 2:
if j.joystick_b == 0:
j.joystick_b = 255
else:
j.joystick_b -= 1
elif direction == "left":
if j.joystick_index == 0:
j.joystick_index = 2
else:
j.joystick_index -= 1
elif direction == "right":
if j.joystick_index == 2:
j.joystick_index = 0
else:
j.joystick_index += 1
c.color = (j.joystick_r, j.joystick_g, j.joystick_b)
def joystickJoystickHeld(direction):
if direction == "up":
if j.joystick_index == 0:
if j.joystick_r == 255:
j.joystick_r = 0
else:
j.joystick_r += 1
if j.joystick_index == 1:
if j.joystick_g == 255:
j.joystick_g = 0
else:
j.joystick_g += 1
if j.joystick_index == 2:
if j.joystick_b == 255:
j.joystick_b = 0
else:
j.joystick_b += 1
elif direction == "down":
if j.joystick_index == 0:
if j.joystick_r == 0:
j.joystick_r = 255
else:
j.joystick_r -= 1
if j.joystick_index == 1:
if j.joystick_g == 0:
j.joystick_g = 255
else:
j.joystick_g -= 1
if j.joystick_index == 2:
if j.joystick_b == 0:
j.joystick_b = 255
else:
j.joystick_b -= 1
c.color = (j.joystick_r, j.joystick_g, j.joystick_b) | 0.055933 | 0.224906 |
from PyQt5 import QtCore, QtGui, QtWidgets
import time
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class Ui_MainWindow(QMainWindow):
def setupUi(self, MainWindow):
# initialising timer to update value every second
timer = QTimer(self)
timer.timeout.connect(self.countdown)
timer.start(1000)
# initialising relevant values
self.start = False
self.days = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.eventname = None
# userinterface, converted from the .ui file (Qt Designer)
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(649, 362)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.event_name = QtWidgets.QLabel(self.centralwidget)
self.event_name.setGeometry(QtCore.QRect(90, 40, 71, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.event_name.setFont(font)
self.event_name.setObjectName("event_name")
self.event_date = QtWidgets.QLabel(self.centralwidget)
self.event_date.setGeometry(QtCore.QRect(90, 80, 71, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.event_date.setFont(font)
self.event_date.setObjectName("event_date")
self.event_time = QtWidgets.QLabel(self.centralwidget)
self.event_time.setGeometry(QtCore.QRect(90, 120, 61, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.event_time.setFont(font)
self.event_time.setObjectName("event_time")
self.input_name = QtWidgets.QLineEdit(self.centralwidget)
self.input_name.setGeometry(QtCore.QRect(170, 40, 451, 31))
self.input_name.setObjectName("input_name")
self.start_button = QtWidgets.QPushButton(self.centralwidget)
self.start_button.clicked.connect(self.start_countdown)
self.start_button.setGeometry(QtCore.QRect(470, 80, 151, 71))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.start_button.setFont(font)
self.start_button.setObjectName("start_button")
self.input_date = QtWidgets.QDateEdit(self.centralwidget)
self.input_date.setDate(QtCore.QDate.currentDate())
self.input_date.setGeometry(QtCore.QRect(170, 81, 291, 31))
self.input_date.setObjectName("input_date")
self.input_time = QtWidgets.QTimeEdit(self.centralwidget)
self.input_time.setGeometry(QtCore.QRect(170, 120, 291, 31))
self.input_time.setObjectName("input_time")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(20, 10, 38, 330))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.verticalLayout_2.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.verticalLayout_2.addWidget(self.label_5)
self.label_6 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.verticalLayout_2.addWidget(self.label_6)
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.verticalLayout_2.addWidget(self.label_7)
self.label_8 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.verticalLayout_2.addWidget(self.label_8)
self.lcd_seconds = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_seconds.setGeometry(QtCore.QRect(500, 200, 111, 101))
self.lcd_seconds.setDigitCount(2)
self.lcd_seconds.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_seconds.setProperty("value", 0.0)
self.lcd_seconds.setObjectName("lcd_seconds")
self.lcd_minutes = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_minutes.setGeometry(QtCore.QRect(380, 200, 111, 101))
self.lcd_minutes.setDigitCount(2)
self.lcd_minutes.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_minutes.setProperty("value", 0.0)
self.lcd_minutes.setObjectName("lcd_minutes")
self.lcd_hours = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_hours.setGeometry(QtCore.QRect(260, 200, 111, 101))
self.lcd_hours.setDigitCount(2)
self.lcd_hours.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_hours.setProperty("value", 0.0)
self.lcd_hours.setObjectName("lcd_hours")
self.lcd_days = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_days.setGeometry(QtCore.QRect(90, 200, 161, 101))
self.lcd_days.setDigitCount(3)
self.lcd_days.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_days.setProperty("value", 0.0)
self.lcd_days.setObjectName("lcd_days")
self.name = QtWidgets.QLabel(self.centralwidget)
self.name.setGeometry(QtCore.QRect(90, 160, 521, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.name.setFont(font)
self.name.setText("")
self.name.setAlignment(QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.name.setObjectName("name")
self.lcd_label_days = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_days.setGeometry(QtCore.QRect(190, 300, 61, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_days.setFont(font)
self.lcd_label_days.setText("Days")
self.lcd_label_days.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_days.setObjectName("lcd_label_days")
self.lcd_label_hours = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_hours.setGeometry(QtCore.QRect(300, 300, 71, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_hours.setFont(font)
self.lcd_label_hours.setText("Hours")
self.lcd_label_hours.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_hours.setObjectName("lcd_label_hours")
self.lcd_label_minutes = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_minutes.setGeometry(QtCore.QRect(390, 300, 101, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_minutes.setFont(font)
self.lcd_label_minutes.setText("Minutes")
self.lcd_label_minutes.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_minutes.setObjectName("lcd_label_minutes")
self.lcd_label_seconds = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_seconds.setGeometry(QtCore.QRect(510, 300, 101, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_seconds.setFont(font)
self.lcd_label_seconds.setText("Seconds")
self.lcd_label_seconds.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_seconds.setObjectName("lcd_label_seconds")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Countdown Timer"))
self.event_name.setText(_translate("MainWindow", "Name"))
self.event_date.setText(_translate("MainWindow", "Date"))
self.event_time.setText(_translate("MainWindow", "Time"))
self.start_button.setText(_translate("MainWindow", "START"))
self.label_4.setText(_translate("MainWindow", "E"))
self.label_5.setText(_translate("MainWindow", "V"))
self.label_6.setText(_translate("MainWindow", "E"))
self.label_7.setText(_translate("MainWindow", "N"))
self.label_8.setText(_translate("MainWindow", "T"))
# userinterface code finishes here. Cooler stuff ahead.
def countdown(self):
"""
Connected to the timer.
1) Updates countdown values every second.
2) Checks if countdown is finished. If so, alerts the user.
"""
if self.start:
if self.seconds != 0:
self.seconds -= 1
elif self.minutes != 0:
self.minutes -= 1
self.seconds = 59
elif self.hours != 0:
self.hours -= 1
self.minutes = 59
self.seconds = 59
else:
self.days -= 1
self.hours = 23
self.minutes = 59
self.seconds = 59
# timer is completed
if self.days == 0 and self.hours == 0 and self.minutes == 0 and self.seconds == 0:
self.name.setText("Countdown to "+self.eventname+" is over!")
self.lcd_seconds.display(self.seconds)
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(self.eventname+" reached.")
msg.setWindowTitle("Alert")
msg.exec_()
self.start = False
self.lcd_seconds.display(self.seconds)
self.lcd_minutes.display(self.minutes)
self.lcd_hours.display(self.hours)
self.lcd_days.display(self.days)
def start_countdown(self):
"""
Initialises countdown using the values entered by the user, and returns an error message if the event name has not been entered or the date/time is entered improperly."""
self.start = False
event = self.input_name.text()
day = self.input_date.date()
day = day.toString("MM.dd.yyyy")
hms = self.input_time.time()
hms = hms.toString("hh:mm")
curr_time = int(time.time())
event_time = day+" "+hms
event_time = time.strptime(event_time, "%m.%d.%Y %H:%M")
event_time = time.mktime(event_time)
total_seconds = event_time-curr_time
if total_seconds <= 0 or event == "" or total_seconds//(3600*24) > 999:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
if event == "":
msg.setInformativeText("Event name cannot be blank.")
elif total_seconds <= 0:
msg.setInformativeText(
"Event timestamp is before current timestamp.")
elif total_seconds//(3600*24) > 999:
msg.setInformativeText("Event date too far in the future.")
msg.setWindowTitle("Error")
msg.exec_()
else:
seconds = int(total_seconds % 60)
minutes = int((total_seconds//60) % 60)
hours = int((total_seconds//3600) % 24)
days = int((total_seconds//(3600*24)))
self.days = days
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.eventname = event
self.lcd_seconds.display(seconds)
self.lcd_minutes.display(minutes)
self.lcd_hours.display(hours)
self.lcd_days.display(days)
self.name.setText("Countdown to "+event)
self.start = True
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | 1-Beginner/countdown_timer/python/countdown-timer.py | from PyQt5 import QtCore, QtGui, QtWidgets
import time
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class Ui_MainWindow(QMainWindow):
def setupUi(self, MainWindow):
# initialising timer to update value every second
timer = QTimer(self)
timer.timeout.connect(self.countdown)
timer.start(1000)
# initialising relevant values
self.start = False
self.days = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.eventname = None
# userinterface, converted from the .ui file (Qt Designer)
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(649, 362)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.event_name = QtWidgets.QLabel(self.centralwidget)
self.event_name.setGeometry(QtCore.QRect(90, 40, 71, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.event_name.setFont(font)
self.event_name.setObjectName("event_name")
self.event_date = QtWidgets.QLabel(self.centralwidget)
self.event_date.setGeometry(QtCore.QRect(90, 80, 71, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.event_date.setFont(font)
self.event_date.setObjectName("event_date")
self.event_time = QtWidgets.QLabel(self.centralwidget)
self.event_time.setGeometry(QtCore.QRect(90, 120, 61, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.event_time.setFont(font)
self.event_time.setObjectName("event_time")
self.input_name = QtWidgets.QLineEdit(self.centralwidget)
self.input_name.setGeometry(QtCore.QRect(170, 40, 451, 31))
self.input_name.setObjectName("input_name")
self.start_button = QtWidgets.QPushButton(self.centralwidget)
self.start_button.clicked.connect(self.start_countdown)
self.start_button.setGeometry(QtCore.QRect(470, 80, 151, 71))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.start_button.setFont(font)
self.start_button.setObjectName("start_button")
self.input_date = QtWidgets.QDateEdit(self.centralwidget)
self.input_date.setDate(QtCore.QDate.currentDate())
self.input_date.setGeometry(QtCore.QRect(170, 81, 291, 31))
self.input_date.setObjectName("input_date")
self.input_time = QtWidgets.QTimeEdit(self.centralwidget)
self.input_time.setGeometry(QtCore.QRect(170, 120, 291, 31))
self.input_time.setObjectName("input_time")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(20, 10, 38, 330))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.verticalLayout_2.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.verticalLayout_2.addWidget(self.label_5)
self.label_6 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.verticalLayout_2.addWidget(self.label_6)
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.verticalLayout_2.addWidget(self.label_7)
self.label_8 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(36)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.verticalLayout_2.addWidget(self.label_8)
self.lcd_seconds = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_seconds.setGeometry(QtCore.QRect(500, 200, 111, 101))
self.lcd_seconds.setDigitCount(2)
self.lcd_seconds.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_seconds.setProperty("value", 0.0)
self.lcd_seconds.setObjectName("lcd_seconds")
self.lcd_minutes = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_minutes.setGeometry(QtCore.QRect(380, 200, 111, 101))
self.lcd_minutes.setDigitCount(2)
self.lcd_minutes.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_minutes.setProperty("value", 0.0)
self.lcd_minutes.setObjectName("lcd_minutes")
self.lcd_hours = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_hours.setGeometry(QtCore.QRect(260, 200, 111, 101))
self.lcd_hours.setDigitCount(2)
self.lcd_hours.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_hours.setProperty("value", 0.0)
self.lcd_hours.setObjectName("lcd_hours")
self.lcd_days = QtWidgets.QLCDNumber(self.centralwidget)
self.lcd_days.setGeometry(QtCore.QRect(90, 200, 161, 101))
self.lcd_days.setDigitCount(3)
self.lcd_days.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.lcd_days.setProperty("value", 0.0)
self.lcd_days.setObjectName("lcd_days")
self.name = QtWidgets.QLabel(self.centralwidget)
self.name.setGeometry(QtCore.QRect(90, 160, 521, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.name.setFont(font)
self.name.setText("")
self.name.setAlignment(QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.name.setObjectName("name")
self.lcd_label_days = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_days.setGeometry(QtCore.QRect(190, 300, 61, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_days.setFont(font)
self.lcd_label_days.setText("Days")
self.lcd_label_days.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_days.setObjectName("lcd_label_days")
self.lcd_label_hours = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_hours.setGeometry(QtCore.QRect(300, 300, 71, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_hours.setFont(font)
self.lcd_label_hours.setText("Hours")
self.lcd_label_hours.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_hours.setObjectName("lcd_label_hours")
self.lcd_label_minutes = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_minutes.setGeometry(QtCore.QRect(390, 300, 101, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_minutes.setFont(font)
self.lcd_label_minutes.setText("Minutes")
self.lcd_label_minutes.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_minutes.setObjectName("lcd_label_minutes")
self.lcd_label_seconds = QtWidgets.QLabel(self.centralwidget)
self.lcd_label_seconds.setGeometry(QtCore.QRect(510, 300, 101, 31))
font = QtGui.QFont()
font.setFamily("Lucida Console")
font.setPointSize(14)
self.lcd_label_seconds.setFont(font)
self.lcd_label_seconds.setText("Seconds")
self.lcd_label_seconds.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lcd_label_seconds.setObjectName("lcd_label_seconds")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Countdown Timer"))
self.event_name.setText(_translate("MainWindow", "Name"))
self.event_date.setText(_translate("MainWindow", "Date"))
self.event_time.setText(_translate("MainWindow", "Time"))
self.start_button.setText(_translate("MainWindow", "START"))
self.label_4.setText(_translate("MainWindow", "E"))
self.label_5.setText(_translate("MainWindow", "V"))
self.label_6.setText(_translate("MainWindow", "E"))
self.label_7.setText(_translate("MainWindow", "N"))
self.label_8.setText(_translate("MainWindow", "T"))
# userinterface code finishes here. Cooler stuff ahead.
def countdown(self):
"""
Connected to the timer.
1) Updates countdown values every second.
2) Checks if countdown is finished. If so, alerts the user.
"""
if self.start:
if self.seconds != 0:
self.seconds -= 1
elif self.minutes != 0:
self.minutes -= 1
self.seconds = 59
elif self.hours != 0:
self.hours -= 1
self.minutes = 59
self.seconds = 59
else:
self.days -= 1
self.hours = 23
self.minutes = 59
self.seconds = 59
# timer is completed
if self.days == 0 and self.hours == 0 and self.minutes == 0 and self.seconds == 0:
self.name.setText("Countdown to "+self.eventname+" is over!")
self.lcd_seconds.display(self.seconds)
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(self.eventname+" reached.")
msg.setWindowTitle("Alert")
msg.exec_()
self.start = False
self.lcd_seconds.display(self.seconds)
self.lcd_minutes.display(self.minutes)
self.lcd_hours.display(self.hours)
self.lcd_days.display(self.days)
def start_countdown(self):
"""
Initialises countdown using the values entered by the user, and returns an error message if the event name has not been entered or the date/time is entered improperly."""
self.start = False
event = self.input_name.text()
day = self.input_date.date()
day = day.toString("MM.dd.yyyy")
hms = self.input_time.time()
hms = hms.toString("hh:mm")
curr_time = int(time.time())
event_time = day+" "+hms
event_time = time.strptime(event_time, "%m.%d.%Y %H:%M")
event_time = time.mktime(event_time)
total_seconds = event_time-curr_time
if total_seconds <= 0 or event == "" or total_seconds//(3600*24) > 999:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
if event == "":
msg.setInformativeText("Event name cannot be blank.")
elif total_seconds <= 0:
msg.setInformativeText(
"Event timestamp is before current timestamp.")
elif total_seconds//(3600*24) > 999:
msg.setInformativeText("Event date too far in the future.")
msg.setWindowTitle("Error")
msg.exec_()
else:
seconds = int(total_seconds % 60)
minutes = int((total_seconds//60) % 60)
hours = int((total_seconds//3600) % 24)
days = int((total_seconds//(3600*24)))
self.days = days
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.eventname = event
self.lcd_seconds.display(seconds)
self.lcd_minutes.display(minutes)
self.lcd_hours.display(hours)
self.lcd_days.display(days)
self.name.setText("Countdown to "+event)
self.start = True
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | 0.3512 | 0.054803 |
from collections import OrderedDict
import numpy as np
from dgp.annotations import (
BoundingBoxOntology, InstanceSegmentationOntology, Ontology, PanopticSegmentation2DAnnotation,
SemanticSegmentation2DAnnotation, SemanticSegmentationOntology
)
from dgp.proto.ontology_pb2 import Ontology as OntologyPB2
from dgp.proto.ontology_pb2 import OntologyItem
def remap_bounding_box_annotations(bounding_box_annotations, lookup_table, original_ontology, remapped_ontology):
"""
Parameters
----------
bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList
Annotations to remap
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
original_ontology: BoundingBoxOntology
Ontology we are remapping annotations from
remapped_ontology: BoundingBoxOntology
Ontology we are mapping annotations to
Returns
-------
remapped_bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList
Remapped annotations with the same type of bounding_box_annotations
"""
assert (isinstance(original_ontology, BoundingBoxOntology) and isinstance(remapped_ontology, BoundingBoxOntology))
# Iterate over boxes constructing box with remapped class for each
remapped_boxlist = []
for box in bounding_box_annotations:
original_class_name = original_ontology.contiguous_id_to_name[box.class_id]
if original_class_name in lookup_table:
# Remap class_id in box
remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]]
box.class_id = remapped_class_id
remapped_boxlist.append(box)
# Instantiate BoundingBox2DAnnotationList or BoundingBox3DAnnotationList with remapped boxlist and remapped BoundingBoxOntology
annotation_type = type(bounding_box_annotations)
return annotation_type(remapped_ontology, remapped_boxlist)
def remap_semantic_segmentation_2d_annotation(
semantic_segmentation_annotation, lookup_table, original_ontology, remapped_ontology
):
"""
Parameters
----------
semantic_segmentation_2d_annotation: SemanticSegmentation2DAnnotation
Annotation to remap
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
original_ontology: SemanticSegmentationOntology
Ontology we are remapping annotation from
remapped_ontology: SemanticSegmentationOntology
Ontology we are mapping annotation to
Returns
-------
remapped_semantic_segmentation_2d_annotation: SemanticSegmentation2DAnnotation
Remapped annotation
"""
assert (isinstance(original_ontology, SemanticSegmentationOntology) and \
isinstance(remapped_ontology, SemanticSegmentationOntology))
original_segmentation_image = semantic_segmentation_annotation.label
remapped_segmentation_image = np.ones_like(original_segmentation_image) * Ontology.VOID_ID
for class_name in lookup_table:
# pylint: disable=E1137
remapped_segmentation_image[original_segmentation_image == original_ontology.name_to_contiguous_id[class_name]] = \
remapped_ontology.name_to_contiguous_id[lookup_table[class_name]]
# pylint: enable=E1137
# Instantiate SemanticSegmentation2DAnnotation with remapped segmentation image and remapped SemanticSegmentationOntology
return SemanticSegmentation2DAnnotation(remapped_ontology, remapped_segmentation_image)
def remap_instance_segmentation_2d_annotation(
instance_segmentation_annotation, lookup_table, original_ontology, remapped_ontology
):
"""
Parameters
----------
instance_segmentation_2d_annotation: PanopticSegmentation2DAnnotation
Annotation to remap
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
original_ontology: InstanceSegmentationOntology
Ontology we are remapping annotation from
remapped_ontology: InstanceSegmentationOntology
Ontology we are mapping annotation to
Returns
-------
PanopticSegmentation2DAnnotation:
Remapped annotation
"""
assert (
isinstance(original_ontology, InstanceSegmentationOntology)
and isinstance(remapped_ontology, InstanceSegmentationOntology)
)
# Iterate over boxes constructing box with remapped class for each
remapped_masklist = []
for instance_mask in instance_segmentation_annotation:
original_class_name = original_ontology.contiguous_id_to_name[instance_mask.class_id]
if original_class_name in lookup_table:
# Remap class_id in box
remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]]
instance_mask.class_id = remapped_class_id
remapped_masklist.append(instance_mask)
assert isinstance(instance_segmentation_annotation, PanopticSegmentation2DAnnotation)
return PanopticSegmentation2DAnnotation.from_masklist(
remapped_masklist, remapped_ontology, instance_segmentation_annotation.panoptic_image.shape,
instance_segmentation_annotation.panoptic_image_dtype
)
def construct_remapped_ontology(ontology, lookup, annotation_key):
"""Given an Ontology object and a lookup from old class names to new class names, construct
an ontology proto for the new ontology that results
Parameters
----------
ontology: dgp.annotations.Ontology
Ontology we are trying to remap using `lookup`
eg. ontology.id_to_name = {0: 'Car', 1: 'Truck', 2: 'Motrocycle'}
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
NOTE: `lookup` needs to be exhaustive; any classes that the user wants to have in returned
ontology need to be remapped explicitly
annotation_key: str
Annotation key of Ontology
e.g. `bounding_box_2d`
Returns
-------
remapped_ontology_pb2: dgp.proto.ontology_pb2.Ontology
Ontology defined by applying `lookup` on original `ontology`
NOTE: This is constructed by iterating over class names in `lookup.keys()` in
alphabetical order, so if both 'Car' and 'Motorcycle' get remapped to 'DynamicObject', the
color for 'DynamicObject' will be the original color for 'Car'
Any class names not in `lookup` are dropped
Notes
-----
This could be a class function of `Ontology`
"""
# Will work with top-level Ontology class here for util to be generic
assert isinstance(ontology, Ontology), f'Expected Ontology, got {type(ontology)}'
# Construct lookup from new class name to original class names that map to it
remapped_class_name_to_original_class_names = OrderedDict()
for class_name, remapped_class_name in lookup.items(): # NOTE: this assumes Ordered
if remapped_class_name not in remapped_class_name_to_original_class_names:
remapped_class_name_to_original_class_names[remapped_class_name] = []
remapped_class_name_to_original_class_names[remapped_class_name].append(class_name)
# Sort alphabetically
remapped_class_name_to_original_class_names = {
k: sorted(v)
for k, v in remapped_class_name_to_original_class_names.items()
}
remapped_ontology_pb2 = OntologyPB2()
for remapped_class_id, (remapped_class_name,
original_class_names) in enumerate(remapped_class_name_to_original_class_names.items()):
# Get class_id and color for class name that we're remapping
original_class_ids = [ontology.name_to_id[class_name] for class_name in original_class_names]
isthing = [ontology.isthing[class_id] for class_id in original_class_ids]
# NOTE: Except semantic_segmentation_2d, classes being grouped together can only be all fromthings or stuffs classes
if annotation_key == 'semantic_segmentation_2d':
# semantic_segmentation_2d should only be stuff
isthing = False
else:
# Enforce that classes mapping to the same class are either all things or all stuff
assert len(set(isthing)) == 1, "Classes mapping to the same class are either all things or all stuff"
isthing = isthing[0]
# Keep first color from original class names (sorted alphabetically)
remapped_class_color = ontology.colormap[original_class_ids[0]]
# Construct remapped ontology item
remapped_ontology_pb2.items.extend([
OntologyItem(
name=remapped_class_name,
id=remapped_class_id,
isthing=isthing,
color=OntologyItem.Color(
r=remapped_class_color[0], g=remapped_class_color[1], b=remapped_class_color[2]
)
)
])
# semantic segmentation 2d will always have a VOID class
if annotation_key == 'semantic_segmentation_2d' and \
not Ontology.VOID_CLASS in remapped_class_name_to_original_class_names:
remapped_ontology_pb2.items.extend([
OntologyItem(
name=Ontology.VOID_CLASS, id=Ontology.VOID_ID, isthing=False, color=OntologyItem.Color(r=0, g=0, b=0)
)
])
return remapped_ontology_pb2 | dgp/annotations/transform_utils.py | from collections import OrderedDict
import numpy as np
from dgp.annotations import (
BoundingBoxOntology, InstanceSegmentationOntology, Ontology, PanopticSegmentation2DAnnotation,
SemanticSegmentation2DAnnotation, SemanticSegmentationOntology
)
from dgp.proto.ontology_pb2 import Ontology as OntologyPB2
from dgp.proto.ontology_pb2 import OntologyItem
def remap_bounding_box_annotations(bounding_box_annotations, lookup_table, original_ontology, remapped_ontology):
"""
Parameters
----------
bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList
Annotations to remap
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
original_ontology: BoundingBoxOntology
Ontology we are remapping annotations from
remapped_ontology: BoundingBoxOntology
Ontology we are mapping annotations to
Returns
-------
remapped_bounding_box_annotations: BoundingBox2DAnnotationList or BoundingBox3DAnnotationList
Remapped annotations with the same type of bounding_box_annotations
"""
assert (isinstance(original_ontology, BoundingBoxOntology) and isinstance(remapped_ontology, BoundingBoxOntology))
# Iterate over boxes constructing box with remapped class for each
remapped_boxlist = []
for box in bounding_box_annotations:
original_class_name = original_ontology.contiguous_id_to_name[box.class_id]
if original_class_name in lookup_table:
# Remap class_id in box
remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]]
box.class_id = remapped_class_id
remapped_boxlist.append(box)
# Instantiate BoundingBox2DAnnotationList or BoundingBox3DAnnotationList with remapped boxlist and remapped BoundingBoxOntology
annotation_type = type(bounding_box_annotations)
return annotation_type(remapped_ontology, remapped_boxlist)
def remap_semantic_segmentation_2d_annotation(
semantic_segmentation_annotation, lookup_table, original_ontology, remapped_ontology
):
"""
Parameters
----------
semantic_segmentation_2d_annotation: SemanticSegmentation2DAnnotation
Annotation to remap
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
original_ontology: SemanticSegmentationOntology
Ontology we are remapping annotation from
remapped_ontology: SemanticSegmentationOntology
Ontology we are mapping annotation to
Returns
-------
remapped_semantic_segmentation_2d_annotation: SemanticSegmentation2DAnnotation
Remapped annotation
"""
assert (isinstance(original_ontology, SemanticSegmentationOntology) and \
isinstance(remapped_ontology, SemanticSegmentationOntology))
original_segmentation_image = semantic_segmentation_annotation.label
remapped_segmentation_image = np.ones_like(original_segmentation_image) * Ontology.VOID_ID
for class_name in lookup_table:
# pylint: disable=E1137
remapped_segmentation_image[original_segmentation_image == original_ontology.name_to_contiguous_id[class_name]] = \
remapped_ontology.name_to_contiguous_id[lookup_table[class_name]]
# pylint: enable=E1137
# Instantiate SemanticSegmentation2DAnnotation with remapped segmentation image and remapped SemanticSegmentationOntology
return SemanticSegmentation2DAnnotation(remapped_ontology, remapped_segmentation_image)
def remap_instance_segmentation_2d_annotation(
instance_segmentation_annotation, lookup_table, original_ontology, remapped_ontology
):
"""
Parameters
----------
instance_segmentation_2d_annotation: PanopticSegmentation2DAnnotation
Annotation to remap
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
original_ontology: InstanceSegmentationOntology
Ontology we are remapping annotation from
remapped_ontology: InstanceSegmentationOntology
Ontology we are mapping annotation to
Returns
-------
PanopticSegmentation2DAnnotation:
Remapped annotation
"""
assert (
isinstance(original_ontology, InstanceSegmentationOntology)
and isinstance(remapped_ontology, InstanceSegmentationOntology)
)
# Iterate over boxes constructing box with remapped class for each
remapped_masklist = []
for instance_mask in instance_segmentation_annotation:
original_class_name = original_ontology.contiguous_id_to_name[instance_mask.class_id]
if original_class_name in lookup_table:
# Remap class_id in box
remapped_class_id = remapped_ontology.name_to_contiguous_id[lookup_table[original_class_name]]
instance_mask.class_id = remapped_class_id
remapped_masklist.append(instance_mask)
assert isinstance(instance_segmentation_annotation, PanopticSegmentation2DAnnotation)
return PanopticSegmentation2DAnnotation.from_masklist(
remapped_masklist, remapped_ontology, instance_segmentation_annotation.panoptic_image.shape,
instance_segmentation_annotation.panoptic_image_dtype
)
def construct_remapped_ontology(ontology, lookup, annotation_key):
"""Given an Ontology object and a lookup from old class names to new class names, construct
an ontology proto for the new ontology that results
Parameters
----------
ontology: dgp.annotations.Ontology
Ontology we are trying to remap using `lookup`
eg. ontology.id_to_name = {0: 'Car', 1: 'Truck', 2: 'Motrocycle'}
lookup: dict
Lookup from old class names to new class names
e.g.:
{
'Car': 'Car',
'Truck': 'Car',
'Motorcycle': 'Motorcycle'
}
NOTE: `lookup` needs to be exhaustive; any classes that the user wants to have in returned
ontology need to be remapped explicitly
annotation_key: str
Annotation key of Ontology
e.g. `bounding_box_2d`
Returns
-------
remapped_ontology_pb2: dgp.proto.ontology_pb2.Ontology
Ontology defined by applying `lookup` on original `ontology`
NOTE: This is constructed by iterating over class names in `lookup.keys()` in
alphabetical order, so if both 'Car' and 'Motorcycle' get remapped to 'DynamicObject', the
color for 'DynamicObject' will be the original color for 'Car'
Any class names not in `lookup` are dropped
Notes
-----
This could be a class function of `Ontology`
"""
# Will work with top-level Ontology class here for util to be generic
assert isinstance(ontology, Ontology), f'Expected Ontology, got {type(ontology)}'
# Construct lookup from new class name to original class names that map to it
remapped_class_name_to_original_class_names = OrderedDict()
for class_name, remapped_class_name in lookup.items(): # NOTE: this assumes Ordered
if remapped_class_name not in remapped_class_name_to_original_class_names:
remapped_class_name_to_original_class_names[remapped_class_name] = []
remapped_class_name_to_original_class_names[remapped_class_name].append(class_name)
# Sort alphabetically
remapped_class_name_to_original_class_names = {
k: sorted(v)
for k, v in remapped_class_name_to_original_class_names.items()
}
remapped_ontology_pb2 = OntologyPB2()
for remapped_class_id, (remapped_class_name,
original_class_names) in enumerate(remapped_class_name_to_original_class_names.items()):
# Get class_id and color for class name that we're remapping
original_class_ids = [ontology.name_to_id[class_name] for class_name in original_class_names]
isthing = [ontology.isthing[class_id] for class_id in original_class_ids]
# NOTE: Except semantic_segmentation_2d, classes being grouped together can only be all fromthings or stuffs classes
if annotation_key == 'semantic_segmentation_2d':
# semantic_segmentation_2d should only be stuff
isthing = False
else:
# Enforce that classes mapping to the same class are either all things or all stuff
assert len(set(isthing)) == 1, "Classes mapping to the same class are either all things or all stuff"
isthing = isthing[0]
# Keep first color from original class names (sorted alphabetically)
remapped_class_color = ontology.colormap[original_class_ids[0]]
# Construct remapped ontology item
remapped_ontology_pb2.items.extend([
OntologyItem(
name=remapped_class_name,
id=remapped_class_id,
isthing=isthing,
color=OntologyItem.Color(
r=remapped_class_color[0], g=remapped_class_color[1], b=remapped_class_color[2]
)
)
])
# semantic segmentation 2d will always have a VOID class
if annotation_key == 'semantic_segmentation_2d' and \
not Ontology.VOID_CLASS in remapped_class_name_to_original_class_names:
remapped_ontology_pb2.items.extend([
OntologyItem(
name=Ontology.VOID_CLASS, id=Ontology.VOID_ID, isthing=False, color=OntologyItem.Color(r=0, g=0, b=0)
)
])
return remapped_ontology_pb2 | 0.903816 | 0.548734 |
import pandas as pd
import numpy as np
import more_itertools
import datetime
import logging
logger = logging.getLogger(__name__)
def parse(raw_response):
logger.info("Parsing raw json response.")
report = raw_response["report"]
raw_data = report["data"]
dimensions, metrics = _parse_header(report)
data = _parse_data(raw_data, metric_count=len(metrics))
header = _fix_header(dimensions, metrics, data)
return pd.DataFrame(data, columns=header)
def _parse_header(report):
logger.debug("Parsing dimensions and metrics.")
dimensions = [_classification_or_name(dimension) for dimension in report["elements"]]
metrics = [metric["name"] for metric in report["metrics"]]
return dimensions, metrics
def _classification_or_name(element):
if "classification" in element:
return element["classification"]
return element["name"]
def _parse_data(data, metric_count):
"""
Recursive parsing of the "data" part of the Adobe response.
:param data: list of dicts and lists. quite a complicated structure
:param metric_count: int, number of metrics in report
:return: list of lists
"""
logger.debug("Parsing report data (recursively).")
if len(data) > 0 and "breakdown" in data[0]:
rows = list()
for chunk in data:
dim_value = _dimension_value(chunk)
rows += [[dim_value] + row
for row in _parse_data(chunk["breakdown"], metric_count)]
return rows
else:
return _parse_most_granular(data, metric_count)
def _parse_most_granular(data, metric_count):
"""
Parsing of the most granular part of the response.
It is different depending on if there's a granularity breakdown or not
:param data: dict
:param metric_count: int, number of metrics in report
:return: list of lists
"""
logger.debug("Parsing most granular level of data.")
rows = list()
for chunk in data:
part_rows = [(val if val != "" else np.nan) for val in chunk["counts"]]
# data alignment is a bit different if adding granularity breakdowns
if len(chunk["counts"]) > metric_count:
part_rows = more_itertools.chunked(iterable=part_rows, n=metric_count + 1)
else:
part_rows = [part_rows]
dim_value = _dimension_value(chunk)
rows += [[dim_value] + part_row for part_row in part_rows]
return rows
def _dimension_value(chunk):
if _dimension_value_is_nan(chunk):
return np.nan
elif "year" in chunk:
return _to_datetime(chunk)
else:
return chunk["name"]
def _dimension_value_is_nan(chunk):
return ("name" not in chunk) or (chunk["name"] == "") or (chunk["name"] == "::unspecified::")
def _to_datetime(chunk):
time_stamp = datetime.datetime(
year=chunk["year"],
month=chunk["month"],
day=chunk["day"],
hour=chunk.get("hour", 0)
)
return time_stamp.strftime("%Y-%m-%d %H:00:00")
def _fix_header(dimensions, metrics, data):
header = dimensions + metrics
if len(header) != len(data[0]): # can only be when granularity breakdown is used
return ["Datetime"] + header
return header | adobe_analytics/reports/parse.py | import pandas as pd
import numpy as np
import more_itertools
import datetime
import logging
logger = logging.getLogger(__name__)
def parse(raw_response):
logger.info("Parsing raw json response.")
report = raw_response["report"]
raw_data = report["data"]
dimensions, metrics = _parse_header(report)
data = _parse_data(raw_data, metric_count=len(metrics))
header = _fix_header(dimensions, metrics, data)
return pd.DataFrame(data, columns=header)
def _parse_header(report):
logger.debug("Parsing dimensions and metrics.")
dimensions = [_classification_or_name(dimension) for dimension in report["elements"]]
metrics = [metric["name"] for metric in report["metrics"]]
return dimensions, metrics
def _classification_or_name(element):
if "classification" in element:
return element["classification"]
return element["name"]
def _parse_data(data, metric_count):
"""
Recursive parsing of the "data" part of the Adobe response.
:param data: list of dicts and lists. quite a complicated structure
:param metric_count: int, number of metrics in report
:return: list of lists
"""
logger.debug("Parsing report data (recursively).")
if len(data) > 0 and "breakdown" in data[0]:
rows = list()
for chunk in data:
dim_value = _dimension_value(chunk)
rows += [[dim_value] + row
for row in _parse_data(chunk["breakdown"], metric_count)]
return rows
else:
return _parse_most_granular(data, metric_count)
def _parse_most_granular(data, metric_count):
"""
Parsing of the most granular part of the response.
It is different depending on if there's a granularity breakdown or not
:param data: dict
:param metric_count: int, number of metrics in report
:return: list of lists
"""
logger.debug("Parsing most granular level of data.")
rows = list()
for chunk in data:
part_rows = [(val if val != "" else np.nan) for val in chunk["counts"]]
# data alignment is a bit different if adding granularity breakdowns
if len(chunk["counts"]) > metric_count:
part_rows = more_itertools.chunked(iterable=part_rows, n=metric_count + 1)
else:
part_rows = [part_rows]
dim_value = _dimension_value(chunk)
rows += [[dim_value] + part_row for part_row in part_rows]
return rows
def _dimension_value(chunk):
if _dimension_value_is_nan(chunk):
return np.nan
elif "year" in chunk:
return _to_datetime(chunk)
else:
return chunk["name"]
def _dimension_value_is_nan(chunk):
return ("name" not in chunk) or (chunk["name"] == "") or (chunk["name"] == "::unspecified::")
def _to_datetime(chunk):
time_stamp = datetime.datetime(
year=chunk["year"],
month=chunk["month"],
day=chunk["day"],
hour=chunk.get("hour", 0)
)
return time_stamp.strftime("%Y-%m-%d %H:00:00")
def _fix_header(dimensions, metrics, data):
header = dimensions + metrics
if len(header) != len(data[0]): # can only be when granularity breakdown is used
return ["Datetime"] + header
return header | 0.692642 | 0.320582 |
import hashlib
import json
import logging
import uuid
from collections import OrderedDict
from os.path import join
from pathlib import Path
from . import _oyaml as oyaml
logger = logging.getLogger(__name__)
def construct_filename(
name,
pretagname=None,
tagname=None,
t1=None,
t2=None,
subfolder=None,
fmu=1,
outroot="../../share/results/",
loc="surface",
verbosity="WARNING",
):
"""Construct filename stem according to datatype (class) and fmu style.
fmu style 1:
surface:
namehorizon--tagname
namehorizon--tagname--t1
namehorizon--tagname--t2_t1
e.g.
topvolantis--ds_gf_extracted
therys--facies_fraction_lowershoreface
grid (geometry):
gridname--<hash>
gridproperty
gridname--proptagname
gridname--tagname--t1
gridname--tagname--t2_t1
e.g.
geogrid_valysar--phit
Destinations accoring to datatype.
Removing dots from filename:
Currently, when multiple dots in a filename stem,
XTgeo, using pathlib, will interpret the part after the
last dot as the file suffix, and remove it. This causes
errors in the output filenames. While this is being
taken care of in XTgeo, we temporarily sanitize dots from
the outgoing filename only to avoid this.
Space will also be replaced in file names.
Returns stem for file name and destination
"""
logger.setLevel(level=verbosity)
stem = "unset"
outroot = Path(outroot)
if fmu == 1:
stem = name.lower()
if tagname:
stem += "--" + tagname.lower()
if pretagname:
stem = pretagname.lower() + "--" + stem
if t1 and not t2:
stem += "--" + str(t1).lower()
elif t1 and t2:
stem += "--" + str(t2).lower() + "_" + str(t1).lower()
stem = stem.replace(".", "_").replace(" ", "_")
if loc == "surface":
dest = outroot / "maps"
elif loc == "grid":
dest = outroot / "grids"
elif loc == "table":
dest = outroot / "tables"
elif loc == "polygons":
dest = outroot / "polygons"
elif loc == "cube":
dest = outroot / "cubes"
else:
dest = outroot / "other"
if subfolder:
dest = dest / subfolder
return stem, dest
def verify_path(dataio, filedest, filename, ext, dryrun=False):
logger.setLevel(level=dataio._verbosity)
logger.debug("Incoming filedest is %s", filedest)
logger.debug("Incoming filename is %s", filename)
logger.debug("Incoming ext is %s", ext)
folder = dataio._pwd / filedest # filedest shall be relative path to PWD
path = Path(folder) / filename.lower()
path = path.with_suffix(path.suffix + ext)
abspath = path.resolve()
logger.debug("path is %s", path)
if not dryrun:
if path.parent.exists():
logger.info("Folder exists")
else:
if dataio.createfolder:
logger.info("No such folder, will create")
path.parent.mkdir(parents=True, exist_ok=True)
else:
raise IOError(f"Folder {str(path.parent)} is not present.")
# create metafile path
metapath = (
(Path(folder) / ("." + filename.lower())).with_suffix(ext + ".yml")
).resolve()
# relative path
relpath = str(filedest).replace("../", "")
if dataio._realfolder is not None and dataio._iterfolder is not None:
relpath = join(f"{dataio._realfolder}/{dataio._iterfolder}", relpath)
relpath = join(f"{relpath}/{filename.lower()}{ext}")
logger.info("Full path to the actual file is: %s", abspath)
logger.info("Full path to the metadata file (if used) is: %s", metapath)
logger.info("Relative path to actual file: %s", relpath)
return path, metapath, relpath, abspath
def drop_nones(dinput: dict) -> dict:
"""Recursively drop Nones in dict dinput and return a new dict."""
# https://stackoverflow.com/a/65379092
dd = {}
for key, val in dinput.items():
if isinstance(val, dict):
dd[key] = drop_nones(val)
elif isinstance(val, (list, set, tuple)):
# note: Nones in lists are not dropped
# simply add "if vv is not None" at the end if required
dd[key] = type(val)(
drop_nones(vv) if isinstance(vv, dict) else vv for vv in val
)
elif val is not None:
dd[key] = val
return dd
def export_metadata_file(yfile, metadata, savefmt="yaml", verbosity="WARNING") -> None:
"""Export genericly and ordered to the complementary metadata file."""
logger.setLevel(level=verbosity)
if metadata:
xdata = drop_nones(metadata)
if savefmt == "yaml":
yamlblock = oyaml.safe_dump(xdata)
with open(yfile, "w") as stream:
stream.write(yamlblock)
else:
jfile = str(yfile).replace(".yml", ".json")
jsonblock = json.dumps(xdata, default=str, indent=2)
with open(jfile, "w") as stream:
stream.write(jsonblock)
else:
raise RuntimeError(
"Export of metadata was requested, but no metadata are present."
)
logger.info("Yaml file on: %s", yfile)
def md5sum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as fil:
for chunk in iter(lambda: fil.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def size(fname):
return Path(fname).stat().st_size
def uuid_from_string(string):
"""Produce valid and repeteable UUID4 as a hash of given string"""
return uuid.UUID(hashlib.md5(string.encode("utf-8")).hexdigest())
def read_parameters_txt(pfile):
"""Read the parameters.txt file and convert to a dict.
The parameters.txt file has this structure::
SENSNAME rms_seed
SENSCASE p10_p90
RMS_SEED 1000
KVKH_CHANNEL 0.6
KVKH_CREVASSE 0.3
GLOBVAR:VOLON_FLOODPLAIN_VOLFRAC 0.256355
GLOBVAR:VOLON_PERMH_CHANNEL 1100
GLOBVAR:VOLON_PORO_CHANNEL 0.2
LOG10_GLOBVAR:FAULT_SEAL_SCALING 0.685516
LOG10_MULTREGT:MULT_THERYS_VOLON -3.21365
LOG10_MULTREGT:MULT_VALYSAR_THERYS -3.2582
...but may also appear on a justified format, with leading
whitespace and tab-justified columns, legacy from earlier
versions but kept alive by some users::
SENSNAME rms_seed
SENSCASE p10_p90
RMS_SEED 1000
KVKH_CHANNEL 0.6
GLOBVAR:VOLON_PERMH_CHANNEL 1100
LOG10_GLOBVAR:FAULT_SEAL_SCALING 0.685516
LOG10_MULTREGT:MULT_THERYS_VOLON -3.21365
This should be parsed as::
{
"SENSNAME": "rms_seed"
"SENSCASE": "p10_p90"
"RMS_SEED": 1000
"KVKH_CHANNEL": 0.6
"KVKH_CREVASSE": 0.3
"GLOBVAR": {"VOLON_FLOODPLAIN_VOLFRAC": 0.256355, ...etc}
}
"""
logger.debug("Reading parameters.txt from %s", pfile)
with open(pfile, "r") as stream:
buffer = stream.read().splitlines()
logger.debug("buffer is of type %s", type(buffer))
logger.debug("buffer has %s lines", str(len(buffer)))
buffer = [":".join(line.split()) for line in buffer]
param = OrderedDict()
for line in buffer:
items = line.split(":")
if len(items) == 2:
param[items[0]] = check_if_number(items[1])
elif len(items) == 3:
if items[0] not in param:
param[items[0]] = OrderedDict()
param[items[0]][items[1]] = check_if_number(items[2])
else:
raise RuntimeError(
f"Unexpected structure of parameters.txt, line is: {line}"
)
return param
def check_if_number(value):
"""Check if value (str) looks like a number and return the converted value."""
res = None
try:
res = int(value)
except ValueError:
try:
res = float(value)
except ValueError:
pass
if res is not None:
return res
return value | src/fmu/dataio/_utils.py | import hashlib
import json
import logging
import uuid
from collections import OrderedDict
from os.path import join
from pathlib import Path
from . import _oyaml as oyaml
logger = logging.getLogger(__name__)
def construct_filename(
name,
pretagname=None,
tagname=None,
t1=None,
t2=None,
subfolder=None,
fmu=1,
outroot="../../share/results/",
loc="surface",
verbosity="WARNING",
):
"""Construct filename stem according to datatype (class) and fmu style.
fmu style 1:
surface:
namehorizon--tagname
namehorizon--tagname--t1
namehorizon--tagname--t2_t1
e.g.
topvolantis--ds_gf_extracted
therys--facies_fraction_lowershoreface
grid (geometry):
gridname--<hash>
gridproperty
gridname--proptagname
gridname--tagname--t1
gridname--tagname--t2_t1
e.g.
geogrid_valysar--phit
Destinations accoring to datatype.
Removing dots from filename:
Currently, when multiple dots in a filename stem,
XTgeo, using pathlib, will interpret the part after the
last dot as the file suffix, and remove it. This causes
errors in the output filenames. While this is being
taken care of in XTgeo, we temporarily sanitize dots from
the outgoing filename only to avoid this.
Space will also be replaced in file names.
Returns stem for file name and destination
"""
logger.setLevel(level=verbosity)
stem = "unset"
outroot = Path(outroot)
if fmu == 1:
stem = name.lower()
if tagname:
stem += "--" + tagname.lower()
if pretagname:
stem = pretagname.lower() + "--" + stem
if t1 and not t2:
stem += "--" + str(t1).lower()
elif t1 and t2:
stem += "--" + str(t2).lower() + "_" + str(t1).lower()
stem = stem.replace(".", "_").replace(" ", "_")
if loc == "surface":
dest = outroot / "maps"
elif loc == "grid":
dest = outroot / "grids"
elif loc == "table":
dest = outroot / "tables"
elif loc == "polygons":
dest = outroot / "polygons"
elif loc == "cube":
dest = outroot / "cubes"
else:
dest = outroot / "other"
if subfolder:
dest = dest / subfolder
return stem, dest
def verify_path(dataio, filedest, filename, ext, dryrun=False):
logger.setLevel(level=dataio._verbosity)
logger.debug("Incoming filedest is %s", filedest)
logger.debug("Incoming filename is %s", filename)
logger.debug("Incoming ext is %s", ext)
folder = dataio._pwd / filedest # filedest shall be relative path to PWD
path = Path(folder) / filename.lower()
path = path.with_suffix(path.suffix + ext)
abspath = path.resolve()
logger.debug("path is %s", path)
if not dryrun:
if path.parent.exists():
logger.info("Folder exists")
else:
if dataio.createfolder:
logger.info("No such folder, will create")
path.parent.mkdir(parents=True, exist_ok=True)
else:
raise IOError(f"Folder {str(path.parent)} is not present.")
# create metafile path
metapath = (
(Path(folder) / ("." + filename.lower())).with_suffix(ext + ".yml")
).resolve()
# relative path
relpath = str(filedest).replace("../", "")
if dataio._realfolder is not None and dataio._iterfolder is not None:
relpath = join(f"{dataio._realfolder}/{dataio._iterfolder}", relpath)
relpath = join(f"{relpath}/{filename.lower()}{ext}")
logger.info("Full path to the actual file is: %s", abspath)
logger.info("Full path to the metadata file (if used) is: %s", metapath)
logger.info("Relative path to actual file: %s", relpath)
return path, metapath, relpath, abspath
def drop_nones(dinput: dict) -> dict:
"""Recursively drop Nones in dict dinput and return a new dict."""
# https://stackoverflow.com/a/65379092
dd = {}
for key, val in dinput.items():
if isinstance(val, dict):
dd[key] = drop_nones(val)
elif isinstance(val, (list, set, tuple)):
# note: Nones in lists are not dropped
# simply add "if vv is not None" at the end if required
dd[key] = type(val)(
drop_nones(vv) if isinstance(vv, dict) else vv for vv in val
)
elif val is not None:
dd[key] = val
return dd
def export_metadata_file(yfile, metadata, savefmt="yaml", verbosity="WARNING") -> None:
"""Export genericly and ordered to the complementary metadata file."""
logger.setLevel(level=verbosity)
if metadata:
xdata = drop_nones(metadata)
if savefmt == "yaml":
yamlblock = oyaml.safe_dump(xdata)
with open(yfile, "w") as stream:
stream.write(yamlblock)
else:
jfile = str(yfile).replace(".yml", ".json")
jsonblock = json.dumps(xdata, default=str, indent=2)
with open(jfile, "w") as stream:
stream.write(jsonblock)
else:
raise RuntimeError(
"Export of metadata was requested, but no metadata are present."
)
logger.info("Yaml file on: %s", yfile)
def md5sum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as fil:
for chunk in iter(lambda: fil.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def size(fname):
return Path(fname).stat().st_size
def uuid_from_string(string):
"""Produce valid and repeteable UUID4 as a hash of given string"""
return uuid.UUID(hashlib.md5(string.encode("utf-8")).hexdigest())
def read_parameters_txt(pfile):
"""Read the parameters.txt file and convert to a dict.
The parameters.txt file has this structure::
SENSNAME rms_seed
SENSCASE p10_p90
RMS_SEED 1000
KVKH_CHANNEL 0.6
KVKH_CREVASSE 0.3
GLOBVAR:VOLON_FLOODPLAIN_VOLFRAC 0.256355
GLOBVAR:VOLON_PERMH_CHANNEL 1100
GLOBVAR:VOLON_PORO_CHANNEL 0.2
LOG10_GLOBVAR:FAULT_SEAL_SCALING 0.685516
LOG10_MULTREGT:MULT_THERYS_VOLON -3.21365
LOG10_MULTREGT:MULT_VALYSAR_THERYS -3.2582
...but may also appear on a justified format, with leading
whitespace and tab-justified columns, legacy from earlier
versions but kept alive by some users::
SENSNAME rms_seed
SENSCASE p10_p90
RMS_SEED 1000
KVKH_CHANNEL 0.6
GLOBVAR:VOLON_PERMH_CHANNEL 1100
LOG10_GLOBVAR:FAULT_SEAL_SCALING 0.685516
LOG10_MULTREGT:MULT_THERYS_VOLON -3.21365
This should be parsed as::
{
"SENSNAME": "rms_seed"
"SENSCASE": "p10_p90"
"RMS_SEED": 1000
"KVKH_CHANNEL": 0.6
"KVKH_CREVASSE": 0.3
"GLOBVAR": {"VOLON_FLOODPLAIN_VOLFRAC": 0.256355, ...etc}
}
"""
logger.debug("Reading parameters.txt from %s", pfile)
with open(pfile, "r") as stream:
buffer = stream.read().splitlines()
logger.debug("buffer is of type %s", type(buffer))
logger.debug("buffer has %s lines", str(len(buffer)))
buffer = [":".join(line.split()) for line in buffer]
param = OrderedDict()
for line in buffer:
items = line.split(":")
if len(items) == 2:
param[items[0]] = check_if_number(items[1])
elif len(items) == 3:
if items[0] not in param:
param[items[0]] = OrderedDict()
param[items[0]][items[1]] = check_if_number(items[2])
else:
raise RuntimeError(
f"Unexpected structure of parameters.txt, line is: {line}"
)
return param
def check_if_number(value):
"""Check if value (str) looks like a number and return the converted value."""
res = None
try:
res = int(value)
except ValueError:
try:
res = float(value)
except ValueError:
pass
if res is not None:
return res
return value | 0.563498 | 0.232986 |
import os
import unittest
from schablonesk.ast_printer import AstPrinter
from schablonesk.scanner import Scanner
from schablonesk.parser import Parser
class ParserTest(unittest.TestCase):
def setUp(self):
self.scanner = Scanner()
def test_parse_for_stmt(self):
code = """
:> for item in list
:> cond
:> is_first
print("List")
:> endcond
print("Item")
:> endfor
"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_for_stmt_with_filter(self):
code = """
:> for item in list where item.has_todo == true or day > 5
print("Item")
:> endfor
"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_logical_expr(self):
code = """
:> cond not (a <> zero) and (b == one or c <= two)
print("OK")
:> endcond
"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_snippet(self):
code = """:> snippet say_hello (greeting name)
$(greeting) $(name)!
:> endsnippet
:> paste say_hello('Hallo' 'Thomas')"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_call(self):
code = ":> add(1 sub(43 2))"
ast = Parser(self.scanner.scan(code)).parse_expr()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_use_all(self):
code = ":> use 'my_standard'"
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_use_some_snippets(self):
code = ":> use head(header) footer from 'my_standard'"
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_file(self):
file_path = os.path.dirname(__file__) + "/demo.schablonesk"
code = self._read_file(file_path)
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
@staticmethod
def _read_file(file_path):
f = open(file_path, "r")
lines = f.readlines()
f.close()
return "".join(lines)
if __name__ == "__main__":
unittest.main() | test/test_parser.py | import os
import unittest
from schablonesk.ast_printer import AstPrinter
from schablonesk.scanner import Scanner
from schablonesk.parser import Parser
class ParserTest(unittest.TestCase):
def setUp(self):
self.scanner = Scanner()
def test_parse_for_stmt(self):
code = """
:> for item in list
:> cond
:> is_first
print("List")
:> endcond
print("Item")
:> endfor
"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_for_stmt_with_filter(self):
code = """
:> for item in list where item.has_todo == true or day > 5
print("Item")
:> endfor
"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_logical_expr(self):
code = """
:> cond not (a <> zero) and (b == one or c <= two)
print("OK")
:> endcond
"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_snippet(self):
code = """:> snippet say_hello (greeting name)
$(greeting) $(name)!
:> endsnippet
:> paste say_hello('Hallo' 'Thomas')"""
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_call(self):
code = ":> add(1 sub(43 2))"
ast = Parser(self.scanner.scan(code)).parse_expr()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_use_all(self):
code = ":> use 'my_standard'"
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_use_some_snippets(self):
code = ":> use head(header) footer from 'my_standard'"
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
def test_parse_file(self):
file_path = os.path.dirname(__file__) + "/demo.schablonesk"
code = self._read_file(file_path)
ast = Parser(self.scanner.scan(code)).parse()
self.assertIsNotNone(ast)
AstPrinter().print(ast)
@staticmethod
def _read_file(file_path):
f = open(file_path, "r")
lines = f.readlines()
f.close()
return "".join(lines)
if __name__ == "__main__":
unittest.main() | 0.428831 | 0.503113 |
import pytest
from flask import json, url_for
from tests.conftest import create_authorization_header
from app.models import Venue
class WhenGettingVenues(object):
def it_returns_all_venues(self, client, sample_venue, db_session):
response = client.get(
url_for('venues.get_venues'),
headers=[create_authorization_header()]
)
assert response.status_code == 200
data = json.loads(response.get_data(as_text=True))
assert len(data) == 1
class WhenGettingVenueByID(object):
def it_returns_correct_venue(self, client, sample_venue, db_session):
response = client.get(
url_for('venue.get_venue_by_id', venue_id=str(sample_venue.id)),
headers=[create_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['id'] == str(sample_venue.id)
class WhenPostingVenues(object):
def it_creates_venues(self, client, db_session):
data = [
{
'name': 'London branch',
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
},
{
'name': 'Test branch',
'address': '1 Test Street',
'directions': 'Nearest station: Teston',
'default': False
},
]
response = client.post(
url_for('venues.create_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == len(data)
assert sorted(data) == sorted([
{
'name': j['name'],
'address': j['address'],
'directions': j['directions'],
'default': j['default']
} for j in json_resp])
def it_creates_venues_for_imported_venues(self, client, db_session):
data = [
{
"id": "1",
"name": "",
"address": "19 Compton Terrace N1 2UN, next door to Union Chapel.",
"tube": "Highbury & Islington (Victoria Line), 2 minutes walk",
"bus": "Bus routes 4, 19, 30, 43 & 277 stop nearby"
},
{
"id": "2",
"name": "Bristol",
"address": "Caf\u00e9 Revival, 56 Corn Street, Bristol, BS1 1JG",
"tube": "",
"bus": ""
}
]
response = client.post(
url_for('venues.import_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == len(data)
for i in range(0, len(data) - 1):
assert json_resp[i]["old_id"] == int(data[i]["id"])
assert json_resp[i]["name"] == data[i]["name"] if data[i]["name"] else 'Head branch'
assert json_resp[i]["address"] == data[i]["address"]
assert json_resp[i]["directions"] == "<div>Bus: {bus}</div><div>Train: {train}</div>".format(
bus=data[i]["bus"], train=data[i]["tube"])
def it_does_not_create_venue_with_existing_name(self, client, db_session, sample_venue):
data = [
{
"id": "1",
"name": sample_venue.name,
"address": "19 Compton Terrace N1 2UN, next door to Union Chapel.",
"tube": "Highbury & Islington (Victoria Line), 2 minutes walk",
"bus": "Bus routes 4, 19, 30, 43 & 277 stop nearby"
}
]
response = client.post(
url_for('venues.import_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
json_resp = json.loads(response.get_data(as_text=True))
assert response.status_code == 201
assert json_resp == []
assert len(Venue.query.all()) == 1
def it_creates_venues_only_first_default(self, client, db_session):
data = [
{
'name': 'London branch',
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
},
{
'name': 'Test branch',
'address': '1 Test Street',
'directions': 'Nearest station: Teston',
'default': True
},
]
response = client.post(
url_for('venues.create_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == len(data)
assert Venue.query.filter_by(name=data[0]['name']).one().default
assert not Venue.query.filter_by(name=data[1]['name']).one().default
assert json_resp[0]['default']
assert not json_resp[1]['default']
def it_doesnt_create_duplicate_venues(self, client, db_session, sample_venue):
data = [{
'name': sample_venue.name,
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
}]
response = client.post(
url_for('venues.create_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == 0
class WhenPostingVenue(object):
def it_creates_a_venue(self, client, db_session):
data = {
'name': 'London branch',
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
}
response = client.post(
url_for('venue.create_venue'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
for key in data.keys():
assert data[key] == json_resp[key]
def it_updates_a_venue(self, client, db_session, sample_venue):
data = {
'name': 'London branch',
'address': '19 New Street',
}
response = client.post(
url_for('venue.update_venue', venue_id=sample_venue.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
for key in data.keys():
assert data[key] == json_resp[key] | tests/app/routes/venues/test_rest.py | import pytest
from flask import json, url_for
from tests.conftest import create_authorization_header
from app.models import Venue
class WhenGettingVenues(object):
def it_returns_all_venues(self, client, sample_venue, db_session):
response = client.get(
url_for('venues.get_venues'),
headers=[create_authorization_header()]
)
assert response.status_code == 200
data = json.loads(response.get_data(as_text=True))
assert len(data) == 1
class WhenGettingVenueByID(object):
def it_returns_correct_venue(self, client, sample_venue, db_session):
response = client.get(
url_for('venue.get_venue_by_id', venue_id=str(sample_venue.id)),
headers=[create_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['id'] == str(sample_venue.id)
class WhenPostingVenues(object):
def it_creates_venues(self, client, db_session):
data = [
{
'name': 'London branch',
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
},
{
'name': 'Test branch',
'address': '1 Test Street',
'directions': 'Nearest station: Teston',
'default': False
},
]
response = client.post(
url_for('venues.create_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == len(data)
assert sorted(data) == sorted([
{
'name': j['name'],
'address': j['address'],
'directions': j['directions'],
'default': j['default']
} for j in json_resp])
def it_creates_venues_for_imported_venues(self, client, db_session):
data = [
{
"id": "1",
"name": "",
"address": "19 Compton Terrace N1 2UN, next door to Union Chapel.",
"tube": "Highbury & Islington (Victoria Line), 2 minutes walk",
"bus": "Bus routes 4, 19, 30, 43 & 277 stop nearby"
},
{
"id": "2",
"name": "Bristol",
"address": "Caf\u00e9 Revival, 56 Corn Street, Bristol, BS1 1JG",
"tube": "",
"bus": ""
}
]
response = client.post(
url_for('venues.import_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == len(data)
for i in range(0, len(data) - 1):
assert json_resp[i]["old_id"] == int(data[i]["id"])
assert json_resp[i]["name"] == data[i]["name"] if data[i]["name"] else 'Head branch'
assert json_resp[i]["address"] == data[i]["address"]
assert json_resp[i]["directions"] == "<div>Bus: {bus}</div><div>Train: {train}</div>".format(
bus=data[i]["bus"], train=data[i]["tube"])
def it_does_not_create_venue_with_existing_name(self, client, db_session, sample_venue):
data = [
{
"id": "1",
"name": sample_venue.name,
"address": "19 Compton Terrace N1 2UN, next door to Union Chapel.",
"tube": "Highbury & Islington (Victoria Line), 2 minutes walk",
"bus": "Bus routes 4, 19, 30, 43 & 277 stop nearby"
}
]
response = client.post(
url_for('venues.import_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
json_resp = json.loads(response.get_data(as_text=True))
assert response.status_code == 201
assert json_resp == []
assert len(Venue.query.all()) == 1
def it_creates_venues_only_first_default(self, client, db_session):
data = [
{
'name': 'London branch',
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
},
{
'name': 'Test branch',
'address': '1 Test Street',
'directions': 'Nearest station: Teston',
'default': True
},
]
response = client.post(
url_for('venues.create_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == len(data)
assert Venue.query.filter_by(name=data[0]['name']).one().default
assert not Venue.query.filter_by(name=data[1]['name']).one().default
assert json_resp[0]['default']
assert not json_resp[1]['default']
def it_doesnt_create_duplicate_venues(self, client, db_session, sample_venue):
data = [{
'name': sample_venue.name,
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
}]
response = client.post(
url_for('venues.create_venues'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == 0
class WhenPostingVenue(object):
def it_creates_a_venue(self, client, db_session):
data = {
'name': 'London branch',
'address': '19 Compton Terrace',
'directions': 'Nearest station: Highbury & Islington',
'default': True
}
response = client.post(
url_for('venue.create_venue'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
for key in data.keys():
assert data[key] == json_resp[key]
def it_updates_a_venue(self, client, db_session, sample_venue):
data = {
'name': 'London branch',
'address': '19 New Street',
}
response = client.post(
url_for('venue.update_venue', venue_id=sample_venue.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
for key in data.keys():
assert data[key] == json_resp[key] | 0.542136 | 0.352982 |
import numpy as np
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine import Layer
from keras.engine import InputSpec
from keras.layers.recurrent import Recurrent, _time_distributed_dense
from keras.legacy import interfaces
class mLSTM(Recurrent):
"""Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
multiplicative_units: Positive integer, numper of multiplicative
units.
multiplicative_initializer: Initializer for the `multiplicative_kernel`
weights matrix, used for the linear transformation of the inputs.
multiplicative_regularizer: Regularizer function applied to
the `multiplicative_kernel` weights matrix
multiplicative_constraint: Constraint function applied to
the `multiplicative_kernel` weights matrix
# References
- [Multiplicative LSTM for sequence modelling](https://arxiv.org/pdf/1609.07959.pdf)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
multiplicative_units=None,
multiplicative_initializer='glorot_uniform',
multiplicative_regularizer=None,
multiplicative_constraint=None,
**kwargs):
super(mLSTM, self).__init__(**kwargs)
# Number of hidden unitsfor layer
self.units = units
# Outer activation function
self.activation = activations.get(activation)
# Internal Activation function
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.multiplicative_initializer = initializers.get(multiplicative_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.multiplicative_regularizer = regularizers.get(multiplicative_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.multiplicative_constraint = constraints.get(multiplicative_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
# In mLSTM, the dimension of m can be arbitrary, however we default it to being equal to the number
# of hidden units
if multiplicative_units:
self.m_units = multiplicative_units
else:
self.m_units = self.units
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec = InputSpec(shape=(batch_size, None, self.input_dim))
self.state_spec = [InputSpec(shape=(batch_size, self.units)),
InputSpec(shape=(batch_size, self.units))]
self.states = [None, None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight((self.input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
(self.m_units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.multiplicative_kernel = self.add_weight((self.input_dim, self.m_units),
name='multiplicative_kernel',
initializer=self.multiplicative_initializer,
regularizer=self.multiplicative_regularizer,
constraint=self.multiplicative_constraint)
self.multiplicative_recurrent_kernel = self.add_weight((self.units, self.m_units),
name='multiplicative_recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
self.built = True
def preprocess_input(self, inputs, training=None):
return inputs
def get_constants(self, inputs, training=None):
constants = []
if 0. < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0. < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
x_m = K.dot(inputs * dp_mask[0], self.multiplicative_kernel)
x_i = K.dot(inputs * dp_mask[0], self.kernel_i)
x_f = K.dot(inputs * dp_mask[1], self.kernel_f)
x_c = K.dot(inputs * dp_mask[2], self.kernel_c)
x_o = K.dot(inputs * dp_mask[3], self.kernel_o)
m = x_m * K.dot(h_tm1, self.multiplicative_recurrent_kernel)
i = self.recurrent_activation(x_i + K.dot(m * rec_dp_mask[0],
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(m * rec_dp_mask[1],
self.recurrent_kernel_f))
c = f * c_tm1 + i * (x_c + K.dot(m * rec_dp_mask[2],
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(m * rec_dp_mask[3],
self.recurrent_kernel_o))
h = self.activation(o * c)
if 0. < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'multiplicative_units': self.m_units,
'multiplicative__initializer': initializers.serialize(self.multiplicative_initializer),
'multiplicative__regularizer': regularizers.serialize(self.multiplicative_regularizer),
'multiplicative__constraint': constraints.serialize(self.multiplicative_constraint)}
base_config = super(mLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | layers/mLSTM.py | import numpy as np
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine import Layer
from keras.engine import InputSpec
from keras.layers.recurrent import Recurrent, _time_distributed_dense
from keras.legacy import interfaces
class mLSTM(Recurrent):
"""Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
multiplicative_units: Positive integer, numper of multiplicative
units.
multiplicative_initializer: Initializer for the `multiplicative_kernel`
weights matrix, used for the linear transformation of the inputs.
multiplicative_regularizer: Regularizer function applied to
the `multiplicative_kernel` weights matrix
multiplicative_constraint: Constraint function applied to
the `multiplicative_kernel` weights matrix
# References
- [Multiplicative LSTM for sequence modelling](https://arxiv.org/pdf/1609.07959.pdf)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
multiplicative_units=None,
multiplicative_initializer='glorot_uniform',
multiplicative_regularizer=None,
multiplicative_constraint=None,
**kwargs):
super(mLSTM, self).__init__(**kwargs)
# Number of hidden unitsfor layer
self.units = units
# Outer activation function
self.activation = activations.get(activation)
# Internal Activation function
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.multiplicative_initializer = initializers.get(multiplicative_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.multiplicative_regularizer = regularizers.get(multiplicative_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.multiplicative_constraint = constraints.get(multiplicative_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
# In mLSTM, the dimension of m can be arbitrary, however we default it to being equal to the number
# of hidden units
if multiplicative_units:
self.m_units = multiplicative_units
else:
self.m_units = self.units
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec = InputSpec(shape=(batch_size, None, self.input_dim))
self.state_spec = [InputSpec(shape=(batch_size, self.units)),
InputSpec(shape=(batch_size, self.units))]
self.states = [None, None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight((self.input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
(self.m_units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.multiplicative_kernel = self.add_weight((self.input_dim, self.m_units),
name='multiplicative_kernel',
initializer=self.multiplicative_initializer,
regularizer=self.multiplicative_regularizer,
constraint=self.multiplicative_constraint)
self.multiplicative_recurrent_kernel = self.add_weight((self.units, self.m_units),
name='multiplicative_recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
self.built = True
def preprocess_input(self, inputs, training=None):
return inputs
def get_constants(self, inputs, training=None):
constants = []
if 0. < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0. < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
x_m = K.dot(inputs * dp_mask[0], self.multiplicative_kernel)
x_i = K.dot(inputs * dp_mask[0], self.kernel_i)
x_f = K.dot(inputs * dp_mask[1], self.kernel_f)
x_c = K.dot(inputs * dp_mask[2], self.kernel_c)
x_o = K.dot(inputs * dp_mask[3], self.kernel_o)
m = x_m * K.dot(h_tm1, self.multiplicative_recurrent_kernel)
i = self.recurrent_activation(x_i + K.dot(m * rec_dp_mask[0],
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(m * rec_dp_mask[1],
self.recurrent_kernel_f))
c = f * c_tm1 + i * (x_c + K.dot(m * rec_dp_mask[2],
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(m * rec_dp_mask[3],
self.recurrent_kernel_o))
h = self.activation(o * c)
if 0. < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'multiplicative_units': self.m_units,
'multiplicative__initializer': initializers.serialize(self.multiplicative_initializer),
'multiplicative__regularizer': regularizers.serialize(self.multiplicative_regularizer),
'multiplicative__constraint': constraints.serialize(self.multiplicative_constraint)}
base_config = super(mLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | 0.928555 | 0.54577 |
from typing import Any, cast, List, Optional, Union
from gitlab import cli
from gitlab import exceptions as exc
from gitlab import types
from gitlab.base import RequiredOptional, RESTManager, RESTObject
from gitlab.mixins import (
CreateMixin,
CRUDMixin,
DeleteMixin,
ListMixin,
ObjectDeleteMixin,
SaveMixin,
)
__all__ = [
"RunnerJob",
"RunnerJobManager",
"Runner",
"RunnerManager",
"GroupRunner",
"GroupRunnerManager",
"ProjectRunner",
"ProjectRunnerManager",
]
class RunnerJob(RESTObject):
pass
class RunnerJobManager(ListMixin, RESTManager):
_path = "/runners/{runner_id}/jobs"
_obj_cls = RunnerJob
_from_parent_attrs = {"runner_id": "id"}
_list_filters = ("status",)
class Runner(SaveMixin, ObjectDeleteMixin, RESTObject):
jobs: RunnerJobManager
class RunnerManager(CRUDMixin, RESTManager):
_path = "/runners"
_obj_cls = Runner
_create_attrs = RequiredOptional(
required=("token",),
optional=(
"description",
"info",
"active",
"locked",
"run_untagged",
"tag_list",
"access_level",
"maximum_timeout",
),
)
_update_attrs = RequiredOptional(
optional=(
"description",
"active",
"tag_list",
"run_untagged",
"locked",
"access_level",
"maximum_timeout",
),
)
_list_filters = ("scope", "tag_list")
_types = {"tag_list": types.CommaSeparatedListAttribute}
@cli.register_custom_action("RunnerManager", (), ("scope",))
@exc.on_http_error(exc.GitlabListError)
def all(self, scope: Optional[str] = None, **kwargs: Any) -> List[Runner]:
"""List all the runners.
Args:
scope: The scope of runners to show, one of: specific,
shared, active, paused, online
all: If True, return all the items, without pagination
per_page: Number of items to retrieve per request
page: ID of the page to return (starts with page 1)
as_list: If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server failed to perform the request
Returns:
A list of runners matching the scope.
"""
path = "/runners/all"
query_data = {}
if scope is not None:
query_data["scope"] = scope
obj = self.gitlab.http_list(path, query_data, **kwargs)
return [self._obj_cls(self, item) for item in obj]
@cli.register_custom_action("RunnerManager", ("token",))
@exc.on_http_error(exc.GitlabVerifyError)
def verify(self, token: str, **kwargs: Any) -> None:
"""Validates authentication credentials for a registered Runner.
Args:
token: The runner's authentication token
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabVerifyError: If the server failed to verify the token
"""
path = "/runners/verify"
post_data = {"token": token}
self.gitlab.http_post(path, post_data=post_data, **kwargs)
def get(self, id: Union[str, int], lazy: bool = False, **kwargs: Any) -> Runner:
return cast(Runner, super().get(id=id, lazy=lazy, **kwargs))
class GroupRunner(RESTObject):
pass
class GroupRunnerManager(ListMixin, RESTManager):
_path = "/groups/{group_id}/runners"
_obj_cls = GroupRunner
_from_parent_attrs = {"group_id": "id"}
_create_attrs = RequiredOptional(required=("runner_id",))
_list_filters = ("scope", "tag_list")
_types = {"tag_list": types.CommaSeparatedListAttribute}
class ProjectRunner(ObjectDeleteMixin, RESTObject):
pass
class ProjectRunnerManager(CreateMixin, DeleteMixin, ListMixin, RESTManager):
_path = "/projects/{project_id}/runners"
_obj_cls = ProjectRunner
_from_parent_attrs = {"project_id": "id"}
_create_attrs = RequiredOptional(required=("runner_id",))
_list_filters = ("scope", "tag_list")
_types = {"tag_list": types.CommaSeparatedListAttribute} | venv/Lib/site-packages/gitlab/v4/objects/runners.py | from typing import Any, cast, List, Optional, Union
from gitlab import cli
from gitlab import exceptions as exc
from gitlab import types
from gitlab.base import RequiredOptional, RESTManager, RESTObject
from gitlab.mixins import (
CreateMixin,
CRUDMixin,
DeleteMixin,
ListMixin,
ObjectDeleteMixin,
SaveMixin,
)
__all__ = [
"RunnerJob",
"RunnerJobManager",
"Runner",
"RunnerManager",
"GroupRunner",
"GroupRunnerManager",
"ProjectRunner",
"ProjectRunnerManager",
]
class RunnerJob(RESTObject):
pass
class RunnerJobManager(ListMixin, RESTManager):
_path = "/runners/{runner_id}/jobs"
_obj_cls = RunnerJob
_from_parent_attrs = {"runner_id": "id"}
_list_filters = ("status",)
class Runner(SaveMixin, ObjectDeleteMixin, RESTObject):
jobs: RunnerJobManager
class RunnerManager(CRUDMixin, RESTManager):
_path = "/runners"
_obj_cls = Runner
_create_attrs = RequiredOptional(
required=("token",),
optional=(
"description",
"info",
"active",
"locked",
"run_untagged",
"tag_list",
"access_level",
"maximum_timeout",
),
)
_update_attrs = RequiredOptional(
optional=(
"description",
"active",
"tag_list",
"run_untagged",
"locked",
"access_level",
"maximum_timeout",
),
)
_list_filters = ("scope", "tag_list")
_types = {"tag_list": types.CommaSeparatedListAttribute}
@cli.register_custom_action("RunnerManager", (), ("scope",))
@exc.on_http_error(exc.GitlabListError)
def all(self, scope: Optional[str] = None, **kwargs: Any) -> List[Runner]:
"""List all the runners.
Args:
scope: The scope of runners to show, one of: specific,
shared, active, paused, online
all: If True, return all the items, without pagination
per_page: Number of items to retrieve per request
page: ID of the page to return (starts with page 1)
as_list: If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server failed to perform the request
Returns:
A list of runners matching the scope.
"""
path = "/runners/all"
query_data = {}
if scope is not None:
query_data["scope"] = scope
obj = self.gitlab.http_list(path, query_data, **kwargs)
return [self._obj_cls(self, item) for item in obj]
@cli.register_custom_action("RunnerManager", ("token",))
@exc.on_http_error(exc.GitlabVerifyError)
def verify(self, token: str, **kwargs: Any) -> None:
"""Validates authentication credentials for a registered Runner.
Args:
token: The runner's authentication token
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabVerifyError: If the server failed to verify the token
"""
path = "/runners/verify"
post_data = {"token": token}
self.gitlab.http_post(path, post_data=post_data, **kwargs)
def get(self, id: Union[str, int], lazy: bool = False, **kwargs: Any) -> Runner:
return cast(Runner, super().get(id=id, lazy=lazy, **kwargs))
class GroupRunner(RESTObject):
pass
class GroupRunnerManager(ListMixin, RESTManager):
_path = "/groups/{group_id}/runners"
_obj_cls = GroupRunner
_from_parent_attrs = {"group_id": "id"}
_create_attrs = RequiredOptional(required=("runner_id",))
_list_filters = ("scope", "tag_list")
_types = {"tag_list": types.CommaSeparatedListAttribute}
class ProjectRunner(ObjectDeleteMixin, RESTObject):
pass
class ProjectRunnerManager(CreateMixin, DeleteMixin, ListMixin, RESTManager):
_path = "/projects/{project_id}/runners"
_obj_cls = ProjectRunner
_from_parent_attrs = {"project_id": "id"}
_create_attrs = RequiredOptional(required=("runner_id",))
_list_filters = ("scope", "tag_list")
_types = {"tag_list": types.CommaSeparatedListAttribute} | 0.905803 | 0.105441 |
import sys, sqlite3
from collections import namedtuple
import MeCab
import random
import Vocabulary1
conn = sqlite3.connect("./wnjpn.db", check_same_thread = False)
Word = namedtuple('Word', 'wordid lang lemma pron pos')
def getWords(lemma):
cur = conn.execute("select * from word where lemma=?", (lemma,))
return [Word(*row) for row in cur]
Sense = namedtuple('Sense', 'synset wordid lang rank lexid freq src')
def getSenses(word):
cur = conn.execute("select * from sense where wordid=?", (word.wordid,))
return [Sense(*row) for row in cur]
Synset = namedtuple('Synset', 'synset pos name src')
def getSynset(synset):
cur = conn.execute("select * from synset where synset=?", (synset,))
return Synset(*cur.fetchone())
def getWordsFromSynset(synset, lang):
cur = conn.execute("select word.* from sense, word where synset=? and word.lang=? and sense.wordid = word.wordid;", (synset,lang))
return [Word(*row) for row in cur]
def getWordsFromSenses(sense, lang="jpn"):
synonym = {}
for s in sense:
lemmas = []
syns = getWordsFromSynset(s.synset, lang)
for sy in syns:
lemmas.append(sy.lemma)
synonym[getSynset(s.synset).name] = lemmas
return synonym
def getSynonym (word):
synonym = {}
words = getWords(word)
if words:
for w in words:
sense = getSenses(w)
s = getWordsFromSenses(sense)
synonym = dict(list(synonym.items()) + list(s.items()))
return synonym
def synonymlist(sentence):
wordwrite = sentence
synonym = getSynonym(wordwrite)
synonym2 = list(synonym.values())
synonym3 = []
for syno in range(len(synonym2)):
synonym3.append(' '.join(synonym2[syno]))
synonym4 = ' '.join(synonym3)
wordlist = synonym4.rstrip(" \n").split(" ")
wordlist2 = []
for w in wordlist:
t4 = MeCab.Tagger("mecabrc")
m3 = t4.parse(w)
if '名詞' in m3:
wordlist2.append(w)
# wordlist2と語彙リストを比較して一致する単語を返す
# print(wordlist2)
wordlist2_set = set(wordlist2)
vocabulary_set = set(Vocabulary1.vocabulary)
vocabulary_list = list(wordlist2_set & vocabulary_set)
# print(vocabulary_list)
max_list = []
if len(vocabulary_list) == 0:
vocabulary_list.append(sentence)
word = vocabulary_list[-1]
elif len(vocabulary_list) == 1:
word = vocabulary_list[-1]
else:
for w1 in vocabulary_list:
number = 0
w2 = ()
for n1 in range(len(sentence)):
number += w1.count(sentence[n1])
w2 = (w1, number)
max_list.append(w2)
max_dic = dict(max_list)
# print("----------類義語の一致度:", max_list)
word_max = max(max_dic.items(), key = lambda x:x[1])[0]
word = word_max
# \\\\\\\\\\
# print("----------語彙リストとの比較結果:", vocabulary_list)
return word
def synonymwords(sentence):
wordwrite = sentence
synonym = getSynonym(wordwrite)
synonym2 = list(synonym.values())
synonym3 = []
for syno in range(len(synonym2)):
synonym3.append(' '.join(synonym2[syno]))
synonym4 = ' '.join(synonym3)
return synonym4
def sentence_generator(speech):
sentenceInput = speech
wordlist2 = []
t = MeCab.Tagger("-Owakati")
m = t.parse(sentenceInput)
result = m.rstrip(" \n").split(" ")
for (i, sen) in enumerate(result):
t2 = MeCab.Tagger("mecabrc")
m2 = t2.parse(sen)
if '名詞' in m2:
synonym6 = synonymlist(sen)
if synonym6 != '':
result[i] = synonym6
sentence2 = ''.join(result)
# print(sentence2)
return sentence2 | SentenceGenerator.py |
import sys, sqlite3
from collections import namedtuple
import MeCab
import random
import Vocabulary1
conn = sqlite3.connect("./wnjpn.db", check_same_thread = False)
Word = namedtuple('Word', 'wordid lang lemma pron pos')
def getWords(lemma):
cur = conn.execute("select * from word where lemma=?", (lemma,))
return [Word(*row) for row in cur]
Sense = namedtuple('Sense', 'synset wordid lang rank lexid freq src')
def getSenses(word):
cur = conn.execute("select * from sense where wordid=?", (word.wordid,))
return [Sense(*row) for row in cur]
Synset = namedtuple('Synset', 'synset pos name src')
def getSynset(synset):
cur = conn.execute("select * from synset where synset=?", (synset,))
return Synset(*cur.fetchone())
def getWordsFromSynset(synset, lang):
cur = conn.execute("select word.* from sense, word where synset=? and word.lang=? and sense.wordid = word.wordid;", (synset,lang))
return [Word(*row) for row in cur]
def getWordsFromSenses(sense, lang="jpn"):
synonym = {}
for s in sense:
lemmas = []
syns = getWordsFromSynset(s.synset, lang)
for sy in syns:
lemmas.append(sy.lemma)
synonym[getSynset(s.synset).name] = lemmas
return synonym
def getSynonym (word):
synonym = {}
words = getWords(word)
if words:
for w in words:
sense = getSenses(w)
s = getWordsFromSenses(sense)
synonym = dict(list(synonym.items()) + list(s.items()))
return synonym
def synonymlist(sentence):
wordwrite = sentence
synonym = getSynonym(wordwrite)
synonym2 = list(synonym.values())
synonym3 = []
for syno in range(len(synonym2)):
synonym3.append(' '.join(synonym2[syno]))
synonym4 = ' '.join(synonym3)
wordlist = synonym4.rstrip(" \n").split(" ")
wordlist2 = []
for w in wordlist:
t4 = MeCab.Tagger("mecabrc")
m3 = t4.parse(w)
if '名詞' in m3:
wordlist2.append(w)
# wordlist2と語彙リストを比較して一致する単語を返す
# print(wordlist2)
wordlist2_set = set(wordlist2)
vocabulary_set = set(Vocabulary1.vocabulary)
vocabulary_list = list(wordlist2_set & vocabulary_set)
# print(vocabulary_list)
max_list = []
if len(vocabulary_list) == 0:
vocabulary_list.append(sentence)
word = vocabulary_list[-1]
elif len(vocabulary_list) == 1:
word = vocabulary_list[-1]
else:
for w1 in vocabulary_list:
number = 0
w2 = ()
for n1 in range(len(sentence)):
number += w1.count(sentence[n1])
w2 = (w1, number)
max_list.append(w2)
max_dic = dict(max_list)
# print("----------類義語の一致度:", max_list)
word_max = max(max_dic.items(), key = lambda x:x[1])[0]
word = word_max
# \\\\\\\\\\
# print("----------語彙リストとの比較結果:", vocabulary_list)
return word
def synonymwords(sentence):
wordwrite = sentence
synonym = getSynonym(wordwrite)
synonym2 = list(synonym.values())
synonym3 = []
for syno in range(len(synonym2)):
synonym3.append(' '.join(synonym2[syno]))
synonym4 = ' '.join(synonym3)
return synonym4
def sentence_generator(speech):
sentenceInput = speech
wordlist2 = []
t = MeCab.Tagger("-Owakati")
m = t.parse(sentenceInput)
result = m.rstrip(" \n").split(" ")
for (i, sen) in enumerate(result):
t2 = MeCab.Tagger("mecabrc")
m2 = t2.parse(sen)
if '名詞' in m2:
synonym6 = synonymlist(sen)
if synonym6 != '':
result[i] = synonym6
sentence2 = ''.join(result)
# print(sentence2)
return sentence2 | 0.141756 | 0.146026 |
from unittest.mock import MagicMock
import copy
from scan.fetchers.cli.cli_fetch_vservice_vnics import CliFetchVserviceVnics
from scan.test.fetch.cli_fetch.test_data.cli_fetch_vservice_vnics import *
from scan.test.fetch.test_fetch import TestFetch
class TestCliFetchVserviceVnics(TestFetch):
def setUp(self):
super().setUp()
self.configure_environment()
self.fetcher = CliFetchVserviceVnics()
self.fetcher.set_env(self.env)
def test_get(self):
# store original methods
original_get_by_id = self.fetcher.inv.get_by_id
original_run_fetch_lines = self.fetcher.run_fetch_lines
original_handle_service = self.fetcher.handle_service
# mock methods
self.fetcher.inv.get_by_id = MagicMock(return_value=NETWORK_NODE)
self.fetcher.run_fetch_lines = MagicMock(return_value=NAME_SPACES)
self.fetcher.handle_service = MagicMock(return_value=SERVICES)
result = self.fetcher.get(NETWORK_NODE['id'])
# reset methods
self.fetcher.inv.get_by_id = original_get_by_id
self.fetcher.run_fetch_lines = original_run_fetch_lines
self.fetcher.handle_service = original_handle_service
self.assertNotEqual(result, [], "Can't get vnics")
def test_get_with_error_host(self):
# store original methods
original_get_by_id = self.fetcher.inv.get_by_id
# mock methods
self.fetcher.inv.get_by_id = MagicMock(return_value=ERROR_NODE)
result = self.fetcher.get(NETWORK_NODE['id'])
# reset methods
self.fetcher.inv.get_by_id = original_get_by_id
self.assertEqual(result, [],
"Can't get empty array when the host "
"doesn't contain host_type")
def test_get_with_compute_host(self):
# store original methods
original_get_by_id = self.fetcher.inv.get_by_id
# mock methods
self.fetcher.inv.get_by_id = MagicMock(return_value=COMPUTE_NODE)
result = self.fetcher.get(NETWORK_NODE['id'])
# reset methods
self.fetcher.inv.get_by_id = original_get_by_id
self.assertEqual(result, [],
"Can't get empty array when the host type "
"doesn't contain network")
def test_handle_service(self):
# store original method
original_run_fetch_lines = self.fetcher.run_fetch_lines
original_set_interface_data = self.fetcher.set_interface_data
# mock the method
self.fetcher.run_fetch_lines = \
MagicMock(return_value=IP_ADDRESS_SHOW_RESULT)
self.fetcher.set_interface_data = MagicMock()
result = self.fetcher.handle_service(NETWORK_NODE['id'], SERVICE_ID)
# reset method
self.fetcher.run_fetch_lines = original_run_fetch_lines
self.fetcher.set_interface_data = original_set_interface_data
self.assertNotEqual(result, [], "Can't get interfaces data")
self.assertEqual(result[0].get("IPv6 Address"), IPV6_ADDRESS,
"incorrect IPv6 address")
def test_set_interface_data(self):
# store original methods
original_get_by_field = self.fetcher.inv.get_by_field
original_get_by_id = self.fetcher.inv.get_by_id
original_set = self.fetcher.inv.set
# mock the methods
self.fetcher.inv.get_by_field = MagicMock(return_value=NETWORK)
self.fetcher.inv.get_by_id = MagicMock(return_value=VSERVICE)
self.fetcher.inv.set = MagicMock()
vnic = copy.deepcopy(VNIC)
self.fetcher.set_interface_data(vnic)
# reset methods
self.fetcher.inv.get_by_field = original_get_by_field
self.fetcher.inv.get_by_id = original_get_by_id
self.fetcher.inv.set = original_set
self.assertIn("data", vnic, "Can't set data")
self.assertIn("cidr", vnic, "Can't set cidr")
self.assertIn("network", vnic, "Can't set network")
def test_handle_mac_address_line(self):
self.fetcher.handle_line(RAW_VNIC, MAC_ADDRESS_LINE)
self.assertEqual(RAW_VNIC['mac_address'], MAC_ADDRESS,
"Can't get the correct mac address from the line")
def test_handle_ipv4_address_line(self):
self.fetcher.handle_line(RAW_VNIC, IPV4_ADDRESS_LINE)
self.assertEqual(RAW_VNIC['IP Address'], IPV4_ADDRESS,
"Can't get the correct ipv4 address from the line")
def test_handle_ipv6_address_line(self):
self.fetcher.handle_line(RAW_VNIC, IPV6_ADDRESS_LINE)
self.assertEqual(RAW_VNIC['IPv6 Address'], IPV6_ADDRESS,
"Can't get the correct ipv6 address from the line")
def test_get_net_size(self):
size = self.fetcher.get_net_size(NET_MASK_ARRAY)
self.assertEqual(size, SIZE, "Can't get the size of network by netmask")
def test_get_cidr_for_vnic(self):
vnic = copy.deepcopy(VNIC)
cidr = self.fetcher.get_cidr_for_vnic(vnic)
self.assertEqual(cidr, CIDR, "the cidr info is wrong") | scan/test/fetch/cli_fetch/test_cli_fetch_vservice_vnics.py | from unittest.mock import MagicMock
import copy
from scan.fetchers.cli.cli_fetch_vservice_vnics import CliFetchVserviceVnics
from scan.test.fetch.cli_fetch.test_data.cli_fetch_vservice_vnics import *
from scan.test.fetch.test_fetch import TestFetch
class TestCliFetchVserviceVnics(TestFetch):
def setUp(self):
super().setUp()
self.configure_environment()
self.fetcher = CliFetchVserviceVnics()
self.fetcher.set_env(self.env)
def test_get(self):
# store original methods
original_get_by_id = self.fetcher.inv.get_by_id
original_run_fetch_lines = self.fetcher.run_fetch_lines
original_handle_service = self.fetcher.handle_service
# mock methods
self.fetcher.inv.get_by_id = MagicMock(return_value=NETWORK_NODE)
self.fetcher.run_fetch_lines = MagicMock(return_value=NAME_SPACES)
self.fetcher.handle_service = MagicMock(return_value=SERVICES)
result = self.fetcher.get(NETWORK_NODE['id'])
# reset methods
self.fetcher.inv.get_by_id = original_get_by_id
self.fetcher.run_fetch_lines = original_run_fetch_lines
self.fetcher.handle_service = original_handle_service
self.assertNotEqual(result, [], "Can't get vnics")
def test_get_with_error_host(self):
# store original methods
original_get_by_id = self.fetcher.inv.get_by_id
# mock methods
self.fetcher.inv.get_by_id = MagicMock(return_value=ERROR_NODE)
result = self.fetcher.get(NETWORK_NODE['id'])
# reset methods
self.fetcher.inv.get_by_id = original_get_by_id
self.assertEqual(result, [],
"Can't get empty array when the host "
"doesn't contain host_type")
def test_get_with_compute_host(self):
# store original methods
original_get_by_id = self.fetcher.inv.get_by_id
# mock methods
self.fetcher.inv.get_by_id = MagicMock(return_value=COMPUTE_NODE)
result = self.fetcher.get(NETWORK_NODE['id'])
# reset methods
self.fetcher.inv.get_by_id = original_get_by_id
self.assertEqual(result, [],
"Can't get empty array when the host type "
"doesn't contain network")
def test_handle_service(self):
# store original method
original_run_fetch_lines = self.fetcher.run_fetch_lines
original_set_interface_data = self.fetcher.set_interface_data
# mock the method
self.fetcher.run_fetch_lines = \
MagicMock(return_value=IP_ADDRESS_SHOW_RESULT)
self.fetcher.set_interface_data = MagicMock()
result = self.fetcher.handle_service(NETWORK_NODE['id'], SERVICE_ID)
# reset method
self.fetcher.run_fetch_lines = original_run_fetch_lines
self.fetcher.set_interface_data = original_set_interface_data
self.assertNotEqual(result, [], "Can't get interfaces data")
self.assertEqual(result[0].get("IPv6 Address"), IPV6_ADDRESS,
"incorrect IPv6 address")
def test_set_interface_data(self):
# store original methods
original_get_by_field = self.fetcher.inv.get_by_field
original_get_by_id = self.fetcher.inv.get_by_id
original_set = self.fetcher.inv.set
# mock the methods
self.fetcher.inv.get_by_field = MagicMock(return_value=NETWORK)
self.fetcher.inv.get_by_id = MagicMock(return_value=VSERVICE)
self.fetcher.inv.set = MagicMock()
vnic = copy.deepcopy(VNIC)
self.fetcher.set_interface_data(vnic)
# reset methods
self.fetcher.inv.get_by_field = original_get_by_field
self.fetcher.inv.get_by_id = original_get_by_id
self.fetcher.inv.set = original_set
self.assertIn("data", vnic, "Can't set data")
self.assertIn("cidr", vnic, "Can't set cidr")
self.assertIn("network", vnic, "Can't set network")
def test_handle_mac_address_line(self):
self.fetcher.handle_line(RAW_VNIC, MAC_ADDRESS_LINE)
self.assertEqual(RAW_VNIC['mac_address'], MAC_ADDRESS,
"Can't get the correct mac address from the line")
def test_handle_ipv4_address_line(self):
self.fetcher.handle_line(RAW_VNIC, IPV4_ADDRESS_LINE)
self.assertEqual(RAW_VNIC['IP Address'], IPV4_ADDRESS,
"Can't get the correct ipv4 address from the line")
def test_handle_ipv6_address_line(self):
self.fetcher.handle_line(RAW_VNIC, IPV6_ADDRESS_LINE)
self.assertEqual(RAW_VNIC['IPv6 Address'], IPV6_ADDRESS,
"Can't get the correct ipv6 address from the line")
def test_get_net_size(self):
size = self.fetcher.get_net_size(NET_MASK_ARRAY)
self.assertEqual(size, SIZE, "Can't get the size of network by netmask")
def test_get_cidr_for_vnic(self):
vnic = copy.deepcopy(VNIC)
cidr = self.fetcher.get_cidr_for_vnic(vnic)
self.assertEqual(cidr, CIDR, "the cidr info is wrong") | 0.76291 | 0.282116 |
import html_generators as h
def assert_equal(a, b):
assert a == b, f'This:\n{a}\nIs not equal to:\n{b}'
import django
from django.conf import settings
from django.http import StreamingHttpResponse
from django.template import Template, Context
from django.template.engine import Engine
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.test_django_settings")
django.setup()
prerendered_br = str(h.Br())
# Ensure we don't escape django safe string
assert_equal(str(h.Fragment(mark_safe('<i>'))), '<i>')
# Ensure django doesn't escape us
assert_equal(conditional_escape(h.I()), '<i></i>')
assert_equal(conditional_escape(prerendered_br), '<br>')
assert_equal(
format_html('{}', h.Br()),
'<br>',
)
assert_equal(
format_html('{}', prerendered_br),
'<br>',
)
assert_equal(
Template('{{a}}').render(
Context({'a': h.Br()})
),
'<br>',
)
assert_equal(
Template('{{a}}').render(
Context({'a': prerendered_br})
),
'<br>',
)
# Ensure custom template tags can return us, and we won't be escaped
assert_equal(
Template(
'''{% load hg_tests %}{% a_br %}{% a_prerendered_br %}''',
).render(
Context({})
),
'<br><br>',
)
# "Infinite streaming response"
from itertools import count, islice
infinite_doc = h.Document(h.Div(x) for x in count())
bits = islice(StreamingHttpResponse(infinite_doc), 100)
assert_equal(''.join(b.decode() for b in bits), '''<!DOCTYPE html>
<div>0</div><div>1</div><div>2</div><div>3</div><div>4</div><div>5</div><div>6</div><div>7</div><div>8</div><div>9</div><div>10</div><div>11</div><div>12</div><div>13</div><div>14</div><div>15</div><div>16</div><div>17</div><div>18</div><div>19</div><div>20</div><div>21</div><div>22</div><div>23</div><div>24''')
from django.utils import timezone
import html_generators.django as hd
now = timezone.now()
assert hd.date(now, 'Y') == str(now.year)
assert hd.static('foo.js') == '/static/foo.js'
assert str(hd.Template('foo.html', context=dict(foo='FOO'))) == 'FOO'
print('Django tests passed.')
# TODO - test django submodule! | tests/test_django.py | import html_generators as h
def assert_equal(a, b):
assert a == b, f'This:\n{a}\nIs not equal to:\n{b}'
import django
from django.conf import settings
from django.http import StreamingHttpResponse
from django.template import Template, Context
from django.template.engine import Engine
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.test_django_settings")
django.setup()
prerendered_br = str(h.Br())
# Ensure we don't escape django safe string
assert_equal(str(h.Fragment(mark_safe('<i>'))), '<i>')
# Ensure django doesn't escape us
assert_equal(conditional_escape(h.I()), '<i></i>')
assert_equal(conditional_escape(prerendered_br), '<br>')
assert_equal(
format_html('{}', h.Br()),
'<br>',
)
assert_equal(
format_html('{}', prerendered_br),
'<br>',
)
assert_equal(
Template('{{a}}').render(
Context({'a': h.Br()})
),
'<br>',
)
assert_equal(
Template('{{a}}').render(
Context({'a': prerendered_br})
),
'<br>',
)
# Ensure custom template tags can return us, and we won't be escaped
assert_equal(
Template(
'''{% load hg_tests %}{% a_br %}{% a_prerendered_br %}''',
).render(
Context({})
),
'<br><br>',
)
# "Infinite streaming response"
from itertools import count, islice
infinite_doc = h.Document(h.Div(x) for x in count())
bits = islice(StreamingHttpResponse(infinite_doc), 100)
assert_equal(''.join(b.decode() for b in bits), '''<!DOCTYPE html>
<div>0</div><div>1</div><div>2</div><div>3</div><div>4</div><div>5</div><div>6</div><div>7</div><div>8</div><div>9</div><div>10</div><div>11</div><div>12</div><div>13</div><div>14</div><div>15</div><div>16</div><div>17</div><div>18</div><div>19</div><div>20</div><div>21</div><div>22</div><div>23</div><div>24''')
from django.utils import timezone
import html_generators.django as hd
now = timezone.now()
assert hd.date(now, 'Y') == str(now.year)
assert hd.static('foo.js') == '/static/foo.js'
assert str(hd.Template('foo.html', context=dict(foo='FOO'))) == 'FOO'
print('Django tests passed.')
# TODO - test django submodule! | 0.290276 | 0.277865 |
import pandas as pd
import streamlit as st
import re
import pydeck as pdk
import numpy as np
import altair as alt
applicants = pd.read_csv('./applicants.csv')
grants = pd.read_csv('./grants.csv')
# lat_midpoint = grants['lat'].median()
# lon_midpoint = grants['lon'].median()
min_grant, max_grant, med_grant = int(grants.Amount.min()), int(grants.Amount.max()), int(grants.Amount.median())
min_app, max_app = int(applicants['Funding Request'].min()), int(applicants['Funding Request'].max())
app_25, app_75 = int(applicants['Funding Request'].quantile(.25)), int(applicants['Funding Request'].quantile(.75))
st.set_page_config(layout='wide')
app_or_grant = st.selectbox(
'Show Applications or Grants?',
('Applications', 'Grants'))
if app_or_grant == 'Applications':
st.title('TIGER Applications')
st.subheader('Applicants')
applicant_entities = list(applicants.State.unique())
def entity_select():
return st.multiselect('Show Applications From:', options=applicant_entities, default=applicant_entities[0])
def slider_select(min_v, max_v, range_v):
return st.slider('Select a Range of Values', min_v, max_v, range_v)
def show_select():
return st.selectbox('Show Sum of Total:', options=['Funding Request', 'Project Cost'])
with st.sidebar:
entity_list = entity_select()
slider = slider_select(0, 3500, (0, 250))
# grant_range = st.slider('Select a Range of Values',\
# min_app, max_app, (app_25, app_75))
filtered = applicants[applicants.State.isin(entity_list)]
st.write(f'There are {len(filtered)} applications from the State(s) you selected. This represents {round(100*len(filtered)/len(applicants), 2)} percent of all applications.')
left_column, right_column = st.beta_columns((1, 2))
with left_column:
show_variable = show_select()
hist_values = filtered.groupby(['State', 'Round']).agg('sum')[show_variable].reset_index()
st.write(hist_values)
with right_column:
alt_chart = alt.Chart(hist_values).\
mark_bar().encode(
x='State',
y=show_variable,
color='State',
column='Round:O'
)
st.subheader(f'Total {show_variable} by Year')
st.altair_chart(alt_chart)
with st.beta_expander('Raw Data'):
st.write(applicants[applicants.State.isin(entity_list)])
# st.bar_chart(data=filtered['Applicant Name'].value_counts())
# st.map(filtered)
elif app_or_grant == 'Grants':
st.title('TIGER Grants Awarded')
min_grant_size = st.slider('Minimum Grant Size', min_grant, max_grant, med_grant, step=int((max_grant - min_grant)/100))
n_grants = len(grants[grants.Amount >= min_grant_size])
prop_grants = round((1 - (n_grants/len(grants))) * 100, 2)
st.write(f'{n_grants} grants awarded in amounts of at least {min_grant_size}. {prop_grants} percent of all grants awarded were less than {min_grant_size}.')
st.subheader('Grants Awarded Map (Guam Excluded)')
st.map(grants[(grants.lon < 0) & (grants.Amount >= min_grant_size)])
with st.beta_expander('Raw Data'):
st.write(grants)
# st.map(applicants[(grants.lon < 0) & (grants.Amount >= min_grant_size)])
# st.pydeck_chart(pdk.Deck(
# map_style='mapbox://styles/mapbox/light-v9',
# layers=[
# pdk.Layer(
# 'HexagonLayer',
# data=grants,
# get_position='[lon, lat]',
# radius=25000,
# elevation_scale=5000,
# elevation_range=[0, 1000],
# pickable=True,
# extruded=True,
# ),
# pdk.Layer(
# 'ScatterplotLayer',
# data=grants,
# get_position='[lon, lat]',
# get_color='[200, 30, 0, 160]',
# get_radius=200,
# ),
# ],
# )) | tiger.py | import pandas as pd
import streamlit as st
import re
import pydeck as pdk
import numpy as np
import altair as alt
applicants = pd.read_csv('./applicants.csv')
grants = pd.read_csv('./grants.csv')
# lat_midpoint = grants['lat'].median()
# lon_midpoint = grants['lon'].median()
min_grant, max_grant, med_grant = int(grants.Amount.min()), int(grants.Amount.max()), int(grants.Amount.median())
min_app, max_app = int(applicants['Funding Request'].min()), int(applicants['Funding Request'].max())
app_25, app_75 = int(applicants['Funding Request'].quantile(.25)), int(applicants['Funding Request'].quantile(.75))
st.set_page_config(layout='wide')
app_or_grant = st.selectbox(
'Show Applications or Grants?',
('Applications', 'Grants'))
if app_or_grant == 'Applications':
st.title('TIGER Applications')
st.subheader('Applicants')
applicant_entities = list(applicants.State.unique())
def entity_select():
return st.multiselect('Show Applications From:', options=applicant_entities, default=applicant_entities[0])
def slider_select(min_v, max_v, range_v):
return st.slider('Select a Range of Values', min_v, max_v, range_v)
def show_select():
return st.selectbox('Show Sum of Total:', options=['Funding Request', 'Project Cost'])
with st.sidebar:
entity_list = entity_select()
slider = slider_select(0, 3500, (0, 250))
# grant_range = st.slider('Select a Range of Values',\
# min_app, max_app, (app_25, app_75))
filtered = applicants[applicants.State.isin(entity_list)]
st.write(f'There are {len(filtered)} applications from the State(s) you selected. This represents {round(100*len(filtered)/len(applicants), 2)} percent of all applications.')
left_column, right_column = st.beta_columns((1, 2))
with left_column:
show_variable = show_select()
hist_values = filtered.groupby(['State', 'Round']).agg('sum')[show_variable].reset_index()
st.write(hist_values)
with right_column:
alt_chart = alt.Chart(hist_values).\
mark_bar().encode(
x='State',
y=show_variable,
color='State',
column='Round:O'
)
st.subheader(f'Total {show_variable} by Year')
st.altair_chart(alt_chart)
with st.beta_expander('Raw Data'):
st.write(applicants[applicants.State.isin(entity_list)])
# st.bar_chart(data=filtered['Applicant Name'].value_counts())
# st.map(filtered)
elif app_or_grant == 'Grants':
st.title('TIGER Grants Awarded')
min_grant_size = st.slider('Minimum Grant Size', min_grant, max_grant, med_grant, step=int((max_grant - min_grant)/100))
n_grants = len(grants[grants.Amount >= min_grant_size])
prop_grants = round((1 - (n_grants/len(grants))) * 100, 2)
st.write(f'{n_grants} grants awarded in amounts of at least {min_grant_size}. {prop_grants} percent of all grants awarded were less than {min_grant_size}.')
st.subheader('Grants Awarded Map (Guam Excluded)')
st.map(grants[(grants.lon < 0) & (grants.Amount >= min_grant_size)])
with st.beta_expander('Raw Data'):
st.write(grants)
# st.map(applicants[(grants.lon < 0) & (grants.Amount >= min_grant_size)])
# st.pydeck_chart(pdk.Deck(
# map_style='mapbox://styles/mapbox/light-v9',
# layers=[
# pdk.Layer(
# 'HexagonLayer',
# data=grants,
# get_position='[lon, lat]',
# radius=25000,
# elevation_scale=5000,
# elevation_range=[0, 1000],
# pickable=True,
# extruded=True,
# ),
# pdk.Layer(
# 'ScatterplotLayer',
# data=grants,
# get_position='[lon, lat]',
# get_color='[200, 30, 0, 160]',
# get_radius=200,
# ),
# ],
# )) | 0.234319 | 0.187114 |
import copy
from Engine import BaseEngine
from GTP import Move
# want policy network to influence evaluation????
# could modify score by policy probability, possibly in a depth-dependent way
def get_board_after_move(board, move):
ret = copy.deepcopy(board)
ret.play_stone(move[0], move[1], board.color_to_play)
return ret
def minimax_eval(board, policy, value, depth):
if depth == 0:
score = value.evaluate(board)
print " "*(3-depth), "leaf node, score =", score
return score
moves = policy.suggest_moves(board)
assert len(moves) > 0
best_score = -99
for move in moves:
next_board = get_board_after_move(board, move)
print " "*(3-depth), "trying move", move
score = -1 * minimax_eval(next_board, policy, value, depth-1)
print " "*(3-depth), "move", move, "has score", score
if score > best_score:
best_score = score
return best_score
def choose_move_minimax(board, policy, value, depth):
assert depth > 0
moves = policy.suggest_moves(board)
best_score = -99
best_move = None
for move in moves:
next_board = get_board_after_move(board, move)
print "minimax root node: trying (%d,%d)..." % (move[0], move[1])
score = -1 * minimax_eval(next_board, policy, value, depth-1)
print "minimax root node: (%d,%d) gives score %f" % (move[0], move[1], score)
if score > best_score:
best_score, best_move = score, move
return best_move
# Return value of position if it's between lower and upper.
# If it's <= lower, return lower; if it's >= upper return upper.
def alphabeta_eval(board, policy, value, lower, upper, depth):
if depth == 0:
score = value.evaluate(board)
print " "*(3-depth), "leaf node, score =", score
return score
moves = policy.suggest_moves(board)
assert len(moves) > 0
for move in moves:
next_board = get_board_after_move(board, move)
print " "*(3-depth), "trying move", move
score = -1 * alphabeta_eval(next_board, policy, value, -upper, -lower, depth-1)
print " "*(3-depth), "move", move, "has score", score
if score >= upper:
print " "*(3-depth), "fail high!"
return upper
if score > lower:
lower = score
return lower
def choose_move_alphabeta(board, policy, value, depth):
assert depth > 0
moves = policy.suggest_moves(board)
lower = -1
upper = +1
best_move = None
for move in moves:
next_board = get_board_after_move(board, move)
print "alpha-beta root node: trying (%d,%d)..." % (move[0], move[1])
score = -1 * alphabeta_eval(next_board, policy, value, -upper, -lower, depth-1)
print "alpha-beta root node: (%d,%d) gives score %f" % (move[0], move[1], score)
if score > lower:
lower, best_move = score, move
return best_move
class TreeSearchEngine(BaseEngine):
def __init__(self, policy, value):
self.policy = policy
self.value = value
def name(self):
return "TreeSearch"
def version(self):
return "1.0"
def pick_move(self, color):
x,y = choose_move_alphabeta(self.board, self.policy, self.value, depth=3)
return Move(x,y)
def get_position_eval(self):
return self.value.evaluate(self.board)
if __name__ == '__main__':
import GTP
fclient = GTP.redirect_all_output("log_engine.txt")
import Policy
import MoveModels
import Eval
import EvalModels
#policy = Policy.AllPolicy()
policy = Policy.TFPolicy(model=MoveModels.Conv12PosDepELU(N=19, Nfeat=21), threshold_prob=0.8, softmax_temp=1.0)
value = Eval.TFEval(EvalModels.Conv11PosDepFC1ELU(N=19, Nfeat=21))
engine = TreeSearchEngine(policy, value)
gtp = GTP.GTP(engine, fclient)
gtp.loop() | support/go-NN-master/engine/TreeSearch.py | import copy
from Engine import BaseEngine
from GTP import Move
# want policy network to influence evaluation????
# could modify score by policy probability, possibly in a depth-dependent way
def get_board_after_move(board, move):
ret = copy.deepcopy(board)
ret.play_stone(move[0], move[1], board.color_to_play)
return ret
def minimax_eval(board, policy, value, depth):
if depth == 0:
score = value.evaluate(board)
print " "*(3-depth), "leaf node, score =", score
return score
moves = policy.suggest_moves(board)
assert len(moves) > 0
best_score = -99
for move in moves:
next_board = get_board_after_move(board, move)
print " "*(3-depth), "trying move", move
score = -1 * minimax_eval(next_board, policy, value, depth-1)
print " "*(3-depth), "move", move, "has score", score
if score > best_score:
best_score = score
return best_score
def choose_move_minimax(board, policy, value, depth):
assert depth > 0
moves = policy.suggest_moves(board)
best_score = -99
best_move = None
for move in moves:
next_board = get_board_after_move(board, move)
print "minimax root node: trying (%d,%d)..." % (move[0], move[1])
score = -1 * minimax_eval(next_board, policy, value, depth-1)
print "minimax root node: (%d,%d) gives score %f" % (move[0], move[1], score)
if score > best_score:
best_score, best_move = score, move
return best_move
# Return value of position if it's between lower and upper.
# If it's <= lower, return lower; if it's >= upper return upper.
def alphabeta_eval(board, policy, value, lower, upper, depth):
if depth == 0:
score = value.evaluate(board)
print " "*(3-depth), "leaf node, score =", score
return score
moves = policy.suggest_moves(board)
assert len(moves) > 0
for move in moves:
next_board = get_board_after_move(board, move)
print " "*(3-depth), "trying move", move
score = -1 * alphabeta_eval(next_board, policy, value, -upper, -lower, depth-1)
print " "*(3-depth), "move", move, "has score", score
if score >= upper:
print " "*(3-depth), "fail high!"
return upper
if score > lower:
lower = score
return lower
def choose_move_alphabeta(board, policy, value, depth):
assert depth > 0
moves = policy.suggest_moves(board)
lower = -1
upper = +1
best_move = None
for move in moves:
next_board = get_board_after_move(board, move)
print "alpha-beta root node: trying (%d,%d)..." % (move[0], move[1])
score = -1 * alphabeta_eval(next_board, policy, value, -upper, -lower, depth-1)
print "alpha-beta root node: (%d,%d) gives score %f" % (move[0], move[1], score)
if score > lower:
lower, best_move = score, move
return best_move
class TreeSearchEngine(BaseEngine):
def __init__(self, policy, value):
self.policy = policy
self.value = value
def name(self):
return "TreeSearch"
def version(self):
return "1.0"
def pick_move(self, color):
x,y = choose_move_alphabeta(self.board, self.policy, self.value, depth=3)
return Move(x,y)
def get_position_eval(self):
return self.value.evaluate(self.board)
if __name__ == '__main__':
import GTP
fclient = GTP.redirect_all_output("log_engine.txt")
import Policy
import MoveModels
import Eval
import EvalModels
#policy = Policy.AllPolicy()
policy = Policy.TFPolicy(model=MoveModels.Conv12PosDepELU(N=19, Nfeat=21), threshold_prob=0.8, softmax_temp=1.0)
value = Eval.TFEval(EvalModels.Conv11PosDepFC1ELU(N=19, Nfeat=21))
engine = TreeSearchEngine(policy, value)
gtp = GTP.GTP(engine, fclient)
gtp.loop() | 0.582254 | 0.487368 |
print "=================================="
# 5-1
age = 20
if age >= 18:
print 'your age is', age # Python代码的缩进规则
print 'adult'
# 退出缩进需要多敲一行回车
print 'END'
score = 75
if score >= 60:
print 'passed'
print "=================================="
# 5-2
if age >= 18:
print 'adult'
else:
print 'teenager'
score = 55
if score >= 60:
print 'passed'
else:
print 'failed'
if age >= 18:
print 'adult'
else:
if age >= 6:
print 'teenager'
else:
print 'kid'
age = 5
if age >= 18:
print 'adult'
else:
if age >= 6:
print 'teenager'
else:
if age >= 3:
print 'kid'
else:
print 'baby'
if age >= 18:
print 'adult'
elif age >= 6:
print 'teenager'
elif age >= 3:
print 'kid'
else:
print 'baby'
age = 8
if age >= 6:
print 'teenager'
elif age >= 18:
print 'adult'
else:
print 'kid'
age = 20
if age >= 6 and age < 18:
print 'teenager'
elif age >= 18:
print 'adult'
else:
print 'kid'
score = 53
if score >= 90:
print 'excellent'
elif score >= 80:
print 'good'
elif score >= 60:
print 'passed'
else:
print 'failed'
print "=================================="
# 5-3
l = ['Adam', 'Lisa', 'Bart']
for name in l:
print name
l = [75, 92, 59, 68]
sum = 0.0
for x in l:
sum = sum + x
print sum / 4
print "=================================="
# 5-4
N = 10
x = 0
while x < N:
print x
x = x + 1
N = 100
x = 1
sum = 0
while x < N:
x = x + 2
sum = sum + x
print sum
print "=================================="
# 5-5
sum = 0
x = 1
while True:
sum = sum + x
x = x + 1
if x > 100:
break
print sum
sum = 0
y = x = 1
while True:
sum = sum + y
y = y * 2
if x == 2:
break
x = x + 1
print sum
print "=================================="
# 5-6
L = [75, 98, 59, 81, 66, 43, 69, 85]
sum = 0.0
n = 0
for x in L:
if x < 60:
continue
sum = sum + x
n = n + 1
print sum / n
sum = 0
x = 0
while True:
x = x + 1
if x > 100:
break
if x % 2 == 0:
continue
sum = sum + x
print sum
print "=================================="
# 5-7
for x in ['A', 'B', 'C']:
for y in ['1', '2', '3']:
print x + y
print
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
for y in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
if x < y:
print x * 10 + y | imooc/1rumen/5.py | print "=================================="
# 5-1
age = 20
if age >= 18:
print 'your age is', age # Python代码的缩进规则
print 'adult'
# 退出缩进需要多敲一行回车
print 'END'
score = 75
if score >= 60:
print 'passed'
print "=================================="
# 5-2
if age >= 18:
print 'adult'
else:
print 'teenager'
score = 55
if score >= 60:
print 'passed'
else:
print 'failed'
if age >= 18:
print 'adult'
else:
if age >= 6:
print 'teenager'
else:
print 'kid'
age = 5
if age >= 18:
print 'adult'
else:
if age >= 6:
print 'teenager'
else:
if age >= 3:
print 'kid'
else:
print 'baby'
if age >= 18:
print 'adult'
elif age >= 6:
print 'teenager'
elif age >= 3:
print 'kid'
else:
print 'baby'
age = 8
if age >= 6:
print 'teenager'
elif age >= 18:
print 'adult'
else:
print 'kid'
age = 20
if age >= 6 and age < 18:
print 'teenager'
elif age >= 18:
print 'adult'
else:
print 'kid'
score = 53
if score >= 90:
print 'excellent'
elif score >= 80:
print 'good'
elif score >= 60:
print 'passed'
else:
print 'failed'
print "=================================="
# 5-3
l = ['Adam', 'Lisa', 'Bart']
for name in l:
print name
l = [75, 92, 59, 68]
sum = 0.0
for x in l:
sum = sum + x
print sum / 4
print "=================================="
# 5-4
N = 10
x = 0
while x < N:
print x
x = x + 1
N = 100
x = 1
sum = 0
while x < N:
x = x + 2
sum = sum + x
print sum
print "=================================="
# 5-5
sum = 0
x = 1
while True:
sum = sum + x
x = x + 1
if x > 100:
break
print sum
sum = 0
y = x = 1
while True:
sum = sum + y
y = y * 2
if x == 2:
break
x = x + 1
print sum
print "=================================="
# 5-6
L = [75, 98, 59, 81, 66, 43, 69, 85]
sum = 0.0
n = 0
for x in L:
if x < 60:
continue
sum = sum + x
n = n + 1
print sum / n
sum = 0
x = 0
while True:
x = x + 1
if x > 100:
break
if x % 2 == 0:
continue
sum = sum + x
print sum
print "=================================="
# 5-7
for x in ['A', 'B', 'C']:
for y in ['1', '2', '3']:
print x + y
print
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
for y in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
if x < y:
print x * 10 + y | 0.095513 | 0.316455 |
import pytest
import os
from src.syn_reports.commands.user_project_access_report import UserProjectAccessReport
@pytest.fixture(scope='session')
def syn_user(syn_client):
return syn_client.getUserProfile(os.environ.get('SYNAPSE_USERNAME'))
def assert_user_success_from_print(capsys, *users):
captured = capsys.readouterr()
assert captured.err == ''
for user in users:
assert 'Username: {0} ({1})'.format(user.userName, user.ownerId) in captured.out
def assert_project_success_from_print(capsys, *projects):
captured = capsys.readouterr()
assert captured.err == ''
for project in projects:
'Project: {0} ({1}) [{2}]'.format(project.name, project.id, 'Adminitrator') in captured.out
def assert_success_from_csv(csv_full_path, user, *entities):
assert os.path.isfile(csv_full_path)
with open(csv_full_path, mode='r') as f:
contents = f.read()
assert user.ownerId in contents
assert user.userName in contents
for entity in entities:
assert entity.id in contents
assert entity.name in contents
def test_it_reports_by_user_id(capsys, syn_user, syn_project):
UserProjectAccessReport(syn_user.ownerId).execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
def test_it_reports_by_username(capsys, syn_user, syn_project):
UserProjectAccessReport(syn_user.userName).execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
def test_it_does_not_blowup_if_user_not_found(capsys, syn_test_helper):
username = syn_test_helper.uniq_name(prefix='Invalid-User')
UserProjectAccessReport(username).execute()
captured = capsys.readouterr()
assert 'Could not find user matching: {0}'.format(username) in captured.err
def test_it_outputs_csv_to_dir(capsys, syn_user, syn_project, mk_tempdir):
out_dir = mk_tempdir()
report = UserProjectAccessReport(syn_user.userName, out_path=out_dir)
report.execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
assert_success_from_csv(report._csv_full_path, syn_user, syn_project)
def test_it_outputs_csv_to_file(capsys, syn_user, syn_project, mk_tempdir):
out_file = os.path.join(mk_tempdir(), 'outfile.csv')
report = UserProjectAccessReport(syn_user.userName, out_path=out_file)
report.execute()
assert report._csv_full_path == out_file
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
assert_success_from_csv(report._csv_full_path, syn_user, syn_project) | tests/syn_reports/commands/user_project_access_report/test_user_project_access_report.py | import pytest
import os
from src.syn_reports.commands.user_project_access_report import UserProjectAccessReport
@pytest.fixture(scope='session')
def syn_user(syn_client):
return syn_client.getUserProfile(os.environ.get('SYNAPSE_USERNAME'))
def assert_user_success_from_print(capsys, *users):
captured = capsys.readouterr()
assert captured.err == ''
for user in users:
assert 'Username: {0} ({1})'.format(user.userName, user.ownerId) in captured.out
def assert_project_success_from_print(capsys, *projects):
captured = capsys.readouterr()
assert captured.err == ''
for project in projects:
'Project: {0} ({1}) [{2}]'.format(project.name, project.id, 'Adminitrator') in captured.out
def assert_success_from_csv(csv_full_path, user, *entities):
assert os.path.isfile(csv_full_path)
with open(csv_full_path, mode='r') as f:
contents = f.read()
assert user.ownerId in contents
assert user.userName in contents
for entity in entities:
assert entity.id in contents
assert entity.name in contents
def test_it_reports_by_user_id(capsys, syn_user, syn_project):
UserProjectAccessReport(syn_user.ownerId).execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
def test_it_reports_by_username(capsys, syn_user, syn_project):
UserProjectAccessReport(syn_user.userName).execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
def test_it_does_not_blowup_if_user_not_found(capsys, syn_test_helper):
username = syn_test_helper.uniq_name(prefix='Invalid-User')
UserProjectAccessReport(username).execute()
captured = capsys.readouterr()
assert 'Could not find user matching: {0}'.format(username) in captured.err
def test_it_outputs_csv_to_dir(capsys, syn_user, syn_project, mk_tempdir):
out_dir = mk_tempdir()
report = UserProjectAccessReport(syn_user.userName, out_path=out_dir)
report.execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
assert_success_from_csv(report._csv_full_path, syn_user, syn_project)
def test_it_outputs_csv_to_file(capsys, syn_user, syn_project, mk_tempdir):
out_file = os.path.join(mk_tempdir(), 'outfile.csv')
report = UserProjectAccessReport(syn_user.userName, out_path=out_file)
report.execute()
assert report._csv_full_path == out_file
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
assert_success_from_csv(report._csv_full_path, syn_user, syn_project) | 0.314787 | 0.267686 |
class RubiksCube:
# init a Rubicks Cube as a list of 54 ints
def __init__(self):
cube = []
for i in range(1, 55):
cube.append(i)
self.cube = cube
# check if cube is finished
def isFinished(self):
for i in range(1, 55):
if self.cube[i] != i:
return False
return True
# count how many stickers are on the wrong side
"""def getErrorCount(self):
errors = 0
for i in range(1, 55):
rightColor = (self.cube[i] - 1) // 9
actualColor = (i - 1) // 9
if rightColor != actualColor:
errors += 1
return errors"""
# prints a text based representation of the cube
def show(self):
# IMPORTANT: By this color order, Up is White, and Front is Orange
sideNames = ["Up", "Front", "Left", "Right", "Down", "Back"]
for i in range(1, 55):
# print sideName
if (i - 1) % 9 == 0:
sideName = sideNames[(i - 1) // 9]
print(sideName + ":")
# format sticker number if single digit
if self.cube[i] < 10:
digitsFormated = " " + str(self.cube[i]) + " "
else:
digitsFormated = str(self.cube[i]) + " "
# print colored sticker (number or ▒▒)
# V1 colors - check cubeV1.py
# V2 colors
import colored #https://pypi.org/project/colored/
colors = [15, 215, 4, 2, 11, 1] # white, orange, blue, green, yellow, red
color = colored.fg(colors[(self.cube[i] - 1) // 9])
reset = colored.attr('reset')
#print(color + digitsFormated + reset, end='') #print digits
print(color + "▒▒ " + reset, end='') #print ▒▒
#print end line or end side
if i % 3 == 0: # Next line
print("")
if i % 9 == 0: # Next side
print("--------")
def rotate(self, move):
# Workaround for not have to write "self.cube[42]" a thousand times: init a short named var
c = []
for cubie in self.cube:
c.append(cubie)
""" Clockwise moves: """
if move == "U":
c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9] = c[7], c[4], c[1], c[8], c[5], c[2], c[9], c[6], c[3]
c[10], c[11], c[12], c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21] = c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21], c[10], c[11], c[12]
if move == "F":
c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18] = c[16], c[13], c[10], c[17], c[14], c[11], c[18], c[15], c[12]
c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37], c[27], c[24], c[21] = c[27], c[24], c[21], c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37]
if move == "L":
c[19], c[20], c[21], c[22], c[23], c[24], c[25], c[26], c[27] = c[25], c[22], c[19], c[26], c[23], c[20], c[27], c[24], c[21]
c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43], c[54], c[51], c[48] = c[54], c[51], c[48], c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43]
if move == "R":
c[28], c[29], c[30], c[31], c[32], c[33], c[34], c[35], c[36] = c[34], c[31], c[28], c[35], c[32], c[29], c[36], c[33], c[30]
c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39], c[18], c[15], c[12] = c[18], c[15], c[12], c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39]
if move == "D":
c[37], c[38], c[39], c[40], c[41], c[42], c[43], c[44], c[45] = c[43], c[40], c[37], c[44], c[41], c[38], c[45], c[42], c[39]
c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54], c[25], c[26], c[27] = c[25], c[26], c[27], c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54]
if move == "B":
c[46], c[47], c[48], c[49], c[50], c[51], c[52], c[53], c[54] = c[52], c[49], c[46], c[53], c[50], c[47], c[54], c[51], c[48]
c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45], c[36], c[33], c[30] = c[36], c[33], c[30], c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45]
""" Counter clockwise moves: """
if move == "Ui":
for _ in range(3):
c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9] = c[7], c[4], c[1], c[8], c[5], c[2], c[9], c[6], c[3]
c[10], c[11], c[12], c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21] = c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21], c[10], c[11], c[12]
if move == "Fi":
for _ in range(3):
c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18] = c[16], c[13], c[10], c[17], c[14], c[11], c[18], c[15], c[12]
c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37], c[27], c[24], c[21] = c[27], c[24], c[21], c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37]
if move == "Li":
for _ in range(3):
c[19], c[20], c[21], c[22], c[23], c[24], c[25], c[26], c[27] = c[25], c[22], c[19], c[26], c[23], c[20], c[27], c[24], c[21]
c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43], c[54], c[51], c[48] = c[54], c[51], c[48], c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43]
if move == "Ri":
for _ in range(3):
c[28], c[29], c[30], c[31], c[32], c[33], c[34], c[35], c[36] = c[34], c[31], c[28], c[35], c[32], c[29], c[36], c[33], c[30]
c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39], c[18], c[15], c[12] = c[18], c[15], c[12], c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39]
if move == "Di":
for _ in range(3):
c[37], c[38], c[39], c[40], c[41], c[42], c[43], c[44], c[45] = c[43], c[40], c[37], c[44], c[41], c[38], c[45], c[42], c[39]
c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54], c[25], c[26], c[27] = c[25], c[26], c[27], c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54]
if move == "Bi":
for _ in range(3):
c[46], c[47], c[48], c[49], c[50], c[51], c[52], c[53], c[54] = c[52], c[49], c[46], c[53], c[50], c[47], c[54], c[51], c[48]
c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45], c[36], c[33], c[30] = c[36], c[33], c[30], c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45]
self.cube = c | cube.py | class RubiksCube:
# init a Rubicks Cube as a list of 54 ints
def __init__(self):
cube = []
for i in range(1, 55):
cube.append(i)
self.cube = cube
# check if cube is finished
def isFinished(self):
for i in range(1, 55):
if self.cube[i] != i:
return False
return True
# count how many stickers are on the wrong side
"""def getErrorCount(self):
errors = 0
for i in range(1, 55):
rightColor = (self.cube[i] - 1) // 9
actualColor = (i - 1) // 9
if rightColor != actualColor:
errors += 1
return errors"""
# prints a text based representation of the cube
def show(self):
# IMPORTANT: By this color order, Up is White, and Front is Orange
sideNames = ["Up", "Front", "Left", "Right", "Down", "Back"]
for i in range(1, 55):
# print sideName
if (i - 1) % 9 == 0:
sideName = sideNames[(i - 1) // 9]
print(sideName + ":")
# format sticker number if single digit
if self.cube[i] < 10:
digitsFormated = " " + str(self.cube[i]) + " "
else:
digitsFormated = str(self.cube[i]) + " "
# print colored sticker (number or ▒▒)
# V1 colors - check cubeV1.py
# V2 colors
import colored #https://pypi.org/project/colored/
colors = [15, 215, 4, 2, 11, 1] # white, orange, blue, green, yellow, red
color = colored.fg(colors[(self.cube[i] - 1) // 9])
reset = colored.attr('reset')
#print(color + digitsFormated + reset, end='') #print digits
print(color + "▒▒ " + reset, end='') #print ▒▒
#print end line or end side
if i % 3 == 0: # Next line
print("")
if i % 9 == 0: # Next side
print("--------")
def rotate(self, move):
# Workaround for not have to write "self.cube[42]" a thousand times: init a short named var
c = []
for cubie in self.cube:
c.append(cubie)
""" Clockwise moves: """
if move == "U":
c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9] = c[7], c[4], c[1], c[8], c[5], c[2], c[9], c[6], c[3]
c[10], c[11], c[12], c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21] = c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21], c[10], c[11], c[12]
if move == "F":
c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18] = c[16], c[13], c[10], c[17], c[14], c[11], c[18], c[15], c[12]
c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37], c[27], c[24], c[21] = c[27], c[24], c[21], c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37]
if move == "L":
c[19], c[20], c[21], c[22], c[23], c[24], c[25], c[26], c[27] = c[25], c[22], c[19], c[26], c[23], c[20], c[27], c[24], c[21]
c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43], c[54], c[51], c[48] = c[54], c[51], c[48], c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43]
if move == "R":
c[28], c[29], c[30], c[31], c[32], c[33], c[34], c[35], c[36] = c[34], c[31], c[28], c[35], c[32], c[29], c[36], c[33], c[30]
c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39], c[18], c[15], c[12] = c[18], c[15], c[12], c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39]
if move == "D":
c[37], c[38], c[39], c[40], c[41], c[42], c[43], c[44], c[45] = c[43], c[40], c[37], c[44], c[41], c[38], c[45], c[42], c[39]
c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54], c[25], c[26], c[27] = c[25], c[26], c[27], c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54]
if move == "B":
c[46], c[47], c[48], c[49], c[50], c[51], c[52], c[53], c[54] = c[52], c[49], c[46], c[53], c[50], c[47], c[54], c[51], c[48]
c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45], c[36], c[33], c[30] = c[36], c[33], c[30], c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45]
""" Counter clockwise moves: """
if move == "Ui":
for _ in range(3):
c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9] = c[7], c[4], c[1], c[8], c[5], c[2], c[9], c[6], c[3]
c[10], c[11], c[12], c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21] = c[28], c[29], c[30], c[46], c[47], c[48], c[19], c[20], c[21], c[10], c[11], c[12]
if move == "Fi":
for _ in range(3):
c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18] = c[16], c[13], c[10], c[17], c[14], c[11], c[18], c[15], c[12]
c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37], c[27], c[24], c[21] = c[27], c[24], c[21], c[7], c[8], c[9], c[28], c[31], c[34], c[39], c[38], c[37]
if move == "Li":
for _ in range(3):
c[19], c[20], c[21], c[22], c[23], c[24], c[25], c[26], c[27] = c[25], c[22], c[19], c[26], c[23], c[20], c[27], c[24], c[21]
c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43], c[54], c[51], c[48] = c[54], c[51], c[48], c[1], c[4], c[7], c[10], c[13], c[16], c[37], c[40], c[43]
if move == "Ri":
for _ in range(3):
c[28], c[29], c[30], c[31], c[32], c[33], c[34], c[35], c[36] = c[34], c[31], c[28], c[35], c[32], c[29], c[36], c[33], c[30]
c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39], c[18], c[15], c[12] = c[18], c[15], c[12], c[9], c[6], c[3], c[46], c[49], c[52], c[45], c[42], c[39]
if move == "Di":
for _ in range(3):
c[37], c[38], c[39], c[40], c[41], c[42], c[43], c[44], c[45] = c[43], c[40], c[37], c[44], c[41], c[38], c[45], c[42], c[39]
c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54], c[25], c[26], c[27] = c[25], c[26], c[27], c[16], c[17], c[18], c[34], c[35], c[36], c[52], c[53], c[54]
if move == "Bi":
for _ in range(3):
c[46], c[47], c[48], c[49], c[50], c[51], c[52], c[53], c[54] = c[52], c[49], c[46], c[53], c[50], c[47], c[54], c[51], c[48]
c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45], c[36], c[33], c[30] = c[36], c[33], c[30], c[3], c[2], c[1], c[19], c[22], c[25], c[43], c[44], c[45]
self.cube = c | 0.220888 | 0.618809 |
from flask import Flask
import redis
import json
from ...service.entity.book import Book
from ...exception.exception import BookAlreadyExistsException
app = Flask(__name__)
BOOK_COUNTER = "book_counter"
BOOK_ID_PREFIX = "book_"
class BookRepository:
def __init__(self):
self.db = redis.Redis(host = "redis", port = 6379, decode_responses = True)
if self.db.get(BOOK_COUNTER) == None:
self.db.set(BOOK_COUNTER, 0)
def save(self, book_req):
app.logger.debug("Saving new book: {0}.".format(book_req))
book = self.find_book_by_title(book_req.title)
if book != None:
raise BookAlreadyExistsException("Book title \"{0}\" already exist.".format(book_req.title))
book = Book(self.db.incr(BOOK_COUNTER), book_req.author_id, book_req.title, book_req.year)
book_id = BOOK_ID_PREFIX + str(book.id)
book_json = json.dumps(book.__dict__)
self.db.set(book_id, book_json)
app.logger.debug("Saved new book: (id: {0}).".format(book.id))
return book.id
def find_book_by_title(self, title):
n = int(self.db.get(BOOK_COUNTER))
for i in range(1, n + 1):
book_id = BOOK_ID_PREFIX + str(i)
if not self.db.exists(book_id):
continue
book_json = self.db.get(book_id)
book = Book.from_json(json.loads(book_json))
if book.title == title:
return book
return None
def count_all(self):
app.logger.debug("Starting counting all books")
n = int(self.db.get(BOOK_COUNTER))
n_of_books = 0
for i in range(1, n + 1):
book_id = BOOK_ID_PREFIX + str(i)
if self.db.exists(book_id):
n_of_books += 1
app.logger.debug("Counted all books (n: {0})".format(n_of_books))
return n_of_books
def find_n_books(self, start, limit):
app.logger.debug("Finding n of books (start: {0}, limit: {1}".format(start, limit))
n = int(self.db.get(BOOK_COUNTER))
books = []
counter = 1
for i in range(1, n + 1):
book_id = BOOK_ID_PREFIX + str(i)
if not self.db.exists(book_id):
continue
if counter < start:
counter += 1
continue
book_json = self.db.get(book_id)
book = Book.from_json(json.loads(book_json))
books.append(book)
if len(books) >= limit:
break
app.logger.debug("Found {0} books.".format(len(books)))
return books | Aplikacja_Webowa_Etap_3/sixth_app/src/service/repositories/book_repository.py | from flask import Flask
import redis
import json
from ...service.entity.book import Book
from ...exception.exception import BookAlreadyExistsException
app = Flask(__name__)
BOOK_COUNTER = "book_counter"
BOOK_ID_PREFIX = "book_"
class BookRepository:
def __init__(self):
self.db = redis.Redis(host = "redis", port = 6379, decode_responses = True)
if self.db.get(BOOK_COUNTER) == None:
self.db.set(BOOK_COUNTER, 0)
def save(self, book_req):
app.logger.debug("Saving new book: {0}.".format(book_req))
book = self.find_book_by_title(book_req.title)
if book != None:
raise BookAlreadyExistsException("Book title \"{0}\" already exist.".format(book_req.title))
book = Book(self.db.incr(BOOK_COUNTER), book_req.author_id, book_req.title, book_req.year)
book_id = BOOK_ID_PREFIX + str(book.id)
book_json = json.dumps(book.__dict__)
self.db.set(book_id, book_json)
app.logger.debug("Saved new book: (id: {0}).".format(book.id))
return book.id
def find_book_by_title(self, title):
n = int(self.db.get(BOOK_COUNTER))
for i in range(1, n + 1):
book_id = BOOK_ID_PREFIX + str(i)
if not self.db.exists(book_id):
continue
book_json = self.db.get(book_id)
book = Book.from_json(json.loads(book_json))
if book.title == title:
return book
return None
def count_all(self):
app.logger.debug("Starting counting all books")
n = int(self.db.get(BOOK_COUNTER))
n_of_books = 0
for i in range(1, n + 1):
book_id = BOOK_ID_PREFIX + str(i)
if self.db.exists(book_id):
n_of_books += 1
app.logger.debug("Counted all books (n: {0})".format(n_of_books))
return n_of_books
def find_n_books(self, start, limit):
app.logger.debug("Finding n of books (start: {0}, limit: {1}".format(start, limit))
n = int(self.db.get(BOOK_COUNTER))
books = []
counter = 1
for i in range(1, n + 1):
book_id = BOOK_ID_PREFIX + str(i)
if not self.db.exists(book_id):
continue
if counter < start:
counter += 1
continue
book_json = self.db.get(book_id)
book = Book.from_json(json.loads(book_json))
books.append(book)
if len(books) >= limit:
break
app.logger.debug("Found {0} books.".format(len(books)))
return books | 0.4206 | 0.138229 |
import torch
import torch.nn as nn
# locals
from .utils import OneHotEncode
from .encoders import SENNEncoder, StyleEncoder, VAEEncoder
from .decoders import SENNDecoder
class SENNConceptizer(nn.Module):
"""Class to reproduce Senn conceptizer architecture
Args:
n_concepts: number of concepts
dataset: MNIST or CIFAR10. Defaults to "MNIST".
Inout:
x: image (b, n_channels, h, w)
Output:
z: vector of concepts (b, n_concepts)
x_tilde: reconstructed image (b, n_channels, h, w)
"""
def __init__(self, n_concepts, dataset = "MNIST"):
super(SENNConceptizer, self).__init__()
self.n_concepts = n_concepts
self.n_channels = 3 if dataset == "CIFAR10" else 1
self.encoder = SENNEncoder(self.n_concepts, self.n_channels)
self.decoder = SENNDecoder(self.n_concepts, self.n_channels)
def forward(self, x):
z = self.encoder(x)
x_tilde = self.decoder(z)
return z, x_tilde.view_as(x)
class VAEConceptizer(nn.Module):
"""Conzeptizer for vaesenn
Args:
n_concepts: number of concepts
n_styles: number of styles
n_classes: number of classes for classification task. Defaults to 10.
dataset: dataset. Defaults to MNIST.
Returns
vaesenn conceptizer module
"""
def __init__(self, n_concepts, n_styles, n_classes = 10, dataset = "MNIST"):
super(VAEConceptizer, self).__init__()
self.n_concepts = n_concepts
self.n_classes = n_classes
self.n_styles = n_styles
self.n_channels = 3 if dataset == "CIFAR10" else 1
self.encoder_concepts = VAEEncoder(self.n_concepts, self.n_channels)
self.decoder_concepts = SENNDecoder(self.n_concepts+self.n_styles, self.n_channels)
self.encoder_styles = VAEEncoder(self.n_styles, self.n_channels)
self.decoder_styles = SENNDecoder(self.n_classes+self.n_styles, self.n_channels)
def forward_styles(self, x, targets):
one_hot = OneHotEncode(self.n_classes)(targets)
mean, log_var = self.encoder_styles(x)
if self.training:
std = torch.exp(0.5 * log_var)
epsilon = torch.randn_like(std)
z = mean + std * epsilon
else:
z = mean
x_decoded = self.decoder_styles(torch.cat([z, one_hot], axis=-1))
return z, mean, log_var, x_decoded.view_as(x)
def forward(self, x):
mean, log_var = self.encoder_concepts(x)
mean_styles, _ = self.encoder_styles(x)
if self.training:
std = torch.exp(0.5 * log_var)
epsilon = torch.randn_like(std)
z = mean + std * epsilon
else:
z = mean
x_decoded = self.decoder_concepts(torch.cat([z, mean_styles], axis=-1))
return z, mean, log_var, x_decoded.view_as(x)
class InvarConceptizer(SENNConceptizer):
"""Conceptizer for invarsenn
Args:
n_concepts: number of concepts
n_e2: number of noise variables
dataset: datset
dropout_rate: dropout rate
Returns:
conceptizer module for invarseen
"""
def __init__(self, n_concepts, n_e2, dataset, dropout_rate = 0.5):
super(InvarConceptizer, self).__init__(n_concepts + n_e2, dataset)
self.n_e2 = n_e2
self.noise = nn.Dropout(p=dropout_rate)
self.fc_e1 = nn.Linear(n_concepts+n_e2, n_concepts)
self.fc_e2 = nn.Linear(n_concepts+n_e2, n_e2)
def forward(self, x):
out = self.encoder(x)
concepts = self.fc_e1(out)
e2 = self.fc_e2(out)
concepts_noisy = self.noise(concepts)
reconstructed_x = self.decoder(torch.cat((concepts_noisy, e2), axis=-1))
return concepts, e2, reconstructed_x.view_as(x)
if __name__ == "__main__":
pass | SENN/conceptizers.py | import torch
import torch.nn as nn
# locals
from .utils import OneHotEncode
from .encoders import SENNEncoder, StyleEncoder, VAEEncoder
from .decoders import SENNDecoder
class SENNConceptizer(nn.Module):
"""Class to reproduce Senn conceptizer architecture
Args:
n_concepts: number of concepts
dataset: MNIST or CIFAR10. Defaults to "MNIST".
Inout:
x: image (b, n_channels, h, w)
Output:
z: vector of concepts (b, n_concepts)
x_tilde: reconstructed image (b, n_channels, h, w)
"""
def __init__(self, n_concepts, dataset = "MNIST"):
super(SENNConceptizer, self).__init__()
self.n_concepts = n_concepts
self.n_channels = 3 if dataset == "CIFAR10" else 1
self.encoder = SENNEncoder(self.n_concepts, self.n_channels)
self.decoder = SENNDecoder(self.n_concepts, self.n_channels)
def forward(self, x):
z = self.encoder(x)
x_tilde = self.decoder(z)
return z, x_tilde.view_as(x)
class VAEConceptizer(nn.Module):
"""Conzeptizer for vaesenn
Args:
n_concepts: number of concepts
n_styles: number of styles
n_classes: number of classes for classification task. Defaults to 10.
dataset: dataset. Defaults to MNIST.
Returns
vaesenn conceptizer module
"""
def __init__(self, n_concepts, n_styles, n_classes = 10, dataset = "MNIST"):
super(VAEConceptizer, self).__init__()
self.n_concepts = n_concepts
self.n_classes = n_classes
self.n_styles = n_styles
self.n_channels = 3 if dataset == "CIFAR10" else 1
self.encoder_concepts = VAEEncoder(self.n_concepts, self.n_channels)
self.decoder_concepts = SENNDecoder(self.n_concepts+self.n_styles, self.n_channels)
self.encoder_styles = VAEEncoder(self.n_styles, self.n_channels)
self.decoder_styles = SENNDecoder(self.n_classes+self.n_styles, self.n_channels)
def forward_styles(self, x, targets):
one_hot = OneHotEncode(self.n_classes)(targets)
mean, log_var = self.encoder_styles(x)
if self.training:
std = torch.exp(0.5 * log_var)
epsilon = torch.randn_like(std)
z = mean + std * epsilon
else:
z = mean
x_decoded = self.decoder_styles(torch.cat([z, one_hot], axis=-1))
return z, mean, log_var, x_decoded.view_as(x)
def forward(self, x):
mean, log_var = self.encoder_concepts(x)
mean_styles, _ = self.encoder_styles(x)
if self.training:
std = torch.exp(0.5 * log_var)
epsilon = torch.randn_like(std)
z = mean + std * epsilon
else:
z = mean
x_decoded = self.decoder_concepts(torch.cat([z, mean_styles], axis=-1))
return z, mean, log_var, x_decoded.view_as(x)
class InvarConceptizer(SENNConceptizer):
"""Conceptizer for invarsenn
Args:
n_concepts: number of concepts
n_e2: number of noise variables
dataset: datset
dropout_rate: dropout rate
Returns:
conceptizer module for invarseen
"""
def __init__(self, n_concepts, n_e2, dataset, dropout_rate = 0.5):
super(InvarConceptizer, self).__init__(n_concepts + n_e2, dataset)
self.n_e2 = n_e2
self.noise = nn.Dropout(p=dropout_rate)
self.fc_e1 = nn.Linear(n_concepts+n_e2, n_concepts)
self.fc_e2 = nn.Linear(n_concepts+n_e2, n_e2)
def forward(self, x):
out = self.encoder(x)
concepts = self.fc_e1(out)
e2 = self.fc_e2(out)
concepts_noisy = self.noise(concepts)
reconstructed_x = self.decoder(torch.cat((concepts_noisy, e2), axis=-1))
return concepts, e2, reconstructed_x.view_as(x)
if __name__ == "__main__":
pass | 0.933484 | 0.427397 |
from django import forms
from allauth.account.forms import SignupForm
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.core.validators import MaxValueValidator, MinValueValidator
from .models import CustomUser
from .models import Booking
from .models import Contact
from .models import Service
class DateInput(forms.DateInput):
input_type = 'date'
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ('username', 'email')
class CustomUserChangeForm(UserChangeForm):
password = None
HOUSING_TYPE = [
('ap', 'Apartment'),
('condo', 'Condo'),
('villa', 'Villa'),
('single', 'Single-family'),
('mansion', 'Mansion'),
('cottage', 'Cottage'),
('tiny', 'Tiny House'),
]
housing_type = forms.ChoiceField(choices=HOUSING_TYPE)
class Meta:
model = CustomUser
fields = ('username', 'first_name', 'last_name', 'email',
'phone', 'address', 'housing_type', 'surface_sqm')
class CustomSignupForm(SignupForm):
HOUSING_TYPE = [
('ap', 'Apartment'),
('condo', 'Condo'),
('villa', 'Villa'),
('single', 'Single-family'),
('mansion', 'Mansion'),
('cottage', 'Cottage'),
('tiny', 'Tiny House'),
]
first_name = forms.CharField(max_length=30, label='First Name')
last_name = forms.CharField(max_length=30, label='Last Name')
phone = forms.CharField(
max_length=12, label='Phone number')
address = forms.CharField(max_length=100, label='Address')
city = forms.CharField(max_length=60, label='City')
postcode = forms.CharField(max_length=5, label='Postcode')
housing_type = forms.ChoiceField(choices=HOUSING_TYPE)
surface_sqm = forms.IntegerField(
validators=[MinValueValidator(20),
MaxValueValidator(500)]
)
def save(self, request):
user = super(CustomSignupForm, self).save(request)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.phone = self.cleaned_data['phone']
user.address = self.cleaned_data['address']
user.city = self.cleaned_data['city']
user.postcode = self.cleaned_data['postcode']
user.housing_type = self.cleaned_data['housing_type']
user.surface_sqm = self.cleaned_data['surface_sqm']
user.save()
return user
class Meta:
model = CustomUser
class ServiceModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.name
class BookingForm(forms.ModelForm):
service = ServiceModelChoiceField(queryset=Service.objects.all())
class Meta:
model = Booking
fields = ['service', 'date', 'mentions']
widgets = {
'date': DateInput(),
}
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ['name', 'email', 'telephone', 'title', 'message'] | my_spotless_app/forms.py | from django import forms
from allauth.account.forms import SignupForm
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.core.validators import MaxValueValidator, MinValueValidator
from .models import CustomUser
from .models import Booking
from .models import Contact
from .models import Service
class DateInput(forms.DateInput):
input_type = 'date'
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ('username', 'email')
class CustomUserChangeForm(UserChangeForm):
password = None
HOUSING_TYPE = [
('ap', 'Apartment'),
('condo', 'Condo'),
('villa', 'Villa'),
('single', 'Single-family'),
('mansion', 'Mansion'),
('cottage', 'Cottage'),
('tiny', 'Tiny House'),
]
housing_type = forms.ChoiceField(choices=HOUSING_TYPE)
class Meta:
model = CustomUser
fields = ('username', 'first_name', 'last_name', 'email',
'phone', 'address', 'housing_type', 'surface_sqm')
class CustomSignupForm(SignupForm):
HOUSING_TYPE = [
('ap', 'Apartment'),
('condo', 'Condo'),
('villa', 'Villa'),
('single', 'Single-family'),
('mansion', 'Mansion'),
('cottage', 'Cottage'),
('tiny', 'Tiny House'),
]
first_name = forms.CharField(max_length=30, label='First Name')
last_name = forms.CharField(max_length=30, label='Last Name')
phone = forms.CharField(
max_length=12, label='Phone number')
address = forms.CharField(max_length=100, label='Address')
city = forms.CharField(max_length=60, label='City')
postcode = forms.CharField(max_length=5, label='Postcode')
housing_type = forms.ChoiceField(choices=HOUSING_TYPE)
surface_sqm = forms.IntegerField(
validators=[MinValueValidator(20),
MaxValueValidator(500)]
)
def save(self, request):
user = super(CustomSignupForm, self).save(request)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.phone = self.cleaned_data['phone']
user.address = self.cleaned_data['address']
user.city = self.cleaned_data['city']
user.postcode = self.cleaned_data['postcode']
user.housing_type = self.cleaned_data['housing_type']
user.surface_sqm = self.cleaned_data['surface_sqm']
user.save()
return user
class Meta:
model = CustomUser
class ServiceModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.name
class BookingForm(forms.ModelForm):
service = ServiceModelChoiceField(queryset=Service.objects.all())
class Meta:
model = Booking
fields = ['service', 'date', 'mentions']
widgets = {
'date': DateInput(),
}
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ['name', 'email', 'telephone', 'title', 'message'] | 0.538498 | 0.081703 |
import torch.nn as nn
import torch
import torchvision.models as models
class TotalGenLoss(nn.Module):
def __init__(self, is_cuda):
super(TotalGenLoss, self).__init__()
self.vgg = VGGContent()
if is_cuda:
self.vgg = self.vgg.cuda()
def forward(self, org_image, gen_image):
vgg_org_image = self.vgg(org_image)
vgg_gen_image = self.vgg(gen_image)
bs = org_image.size(0)
content_loss = ((vgg_org_image - vgg_gen_image) ** 2).mean(1)
mae_gen_loss = (torch.abs(org_image - gen_image)).view(bs, -1).mean(1)
return (0.7 * mae_gen_loss + 0.3 * content_loss).mean()
class VGGContent(nn.Module):
def __init__(self):
super(VGGContent, self).__init__()
self.vgg = models.vgg19_bn(pretrained=True).features
def forward(self, x):
bs = x.size(0)
return self.vgg(x).view(bs, -1)
def build_conv_block(in_chans, out_chans, kernel_size=3, stride=2, padding=1, use_bn=True, bn_momentum=0.8, use_leaky=False):
layers = []
layers.append(nn.Conv2d(in_chans, out_chans, kernel_size, stride, padding))
if use_leaky:
layers.append(nn.LeakyReLU(negative_slope=0.2, inplace=True))
else:
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=bn_momentum))
return nn.Sequential(*layers)
def build_deconv_block(in_chans, out_chans, use_bn=True):
layers = []
layers.append(nn.Upsample(scale_factor=2,
mode="bilinear", align_corners=True))
layers.append(nn.Conv2d(in_chans, out_chans, 3, 1, 1))
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=0.8))
return nn.Sequential(*layers)
class FUnIEGeneratorV1(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEGeneratorV1, self).__init__()
self.conv1 = build_conv_block(
3, n_feats, 5, padding=2, use_bn=False)
self.conv2 = build_conv_block(n_feats, n_feats*4, 4)
self.conv3 = build_conv_block(n_feats*4, n_feats*8, 4)
self.conv4 = build_conv_block(n_feats*8, n_feats*8)
self.conv5 = build_conv_block(n_feats*8, n_feats*8)
self.deconv1 = build_deconv_block(n_feats*8, n_feats*8)
self.deconv2 = build_deconv_block(n_feats*16, n_feats*8)
self.deconv3 = build_deconv_block(n_feats*16, n_feats*4)
self.deconv4 = build_deconv_block(n_feats*8, n_feats*1)
self.deconv5 = nn.Upsample(
scale_factor=2, mode="bilinear", align_corners=True)
# In this work, kernel size is 3 instead of 4
self.final = nn.Conv2d(n_feats*2, 3, 3, 1, 1)
self.act = nn.Tanh()
def forward(self, x):
# Downsample
d1 = self.conv1(x) # (B, 32, 128, 128)
d2 = self.conv2(d1) # (B, 128, 64, 64)
d3 = self.conv3(d2) # (B, 256, 32, 32)
d4 = self.conv4(d3) # (B, 256, 16, 16)
d5 = self.conv5(d4) # (B, 256, 8, 8)
# Upsample
u1 = torch.cat([self.deconv1(d5), d4], dim=1) # (B, 512, 16, 16)
u2 = torch.cat([self.deconv2(u1), d3], dim=1) # (B, 512, 32, 32)
u3 = torch.cat([self.deconv3(u2), d2], dim=1) # (B, 256, 64, 64)
u4 = torch.cat([self.deconv4(u3), d1], dim=1) # (B, 64, 128, 128)
u5 = self.deconv5(u4) # (B, 64, 256, 256)
return self.act(self.final(u5))
class FUnIEGeneratorV2(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEGeneratorV2, self).__init__()
self.conv1 = build_conv_block(
3, n_feats, 5, stride=1, padding=2, use_bn=False)
# In this work, kernel size is 3 instead of 4
self.conv2 = build_conv_block(
n_feats, n_feats*2, stride=1, bn_momentum=0.75)
# In this work, kernel size is 3 instead of 4
self.conv3 = build_conv_block(
n_feats*2, n_feats*2, stride=1, bn_momentum=0.75)
self.conv4 = build_conv_block(
n_feats*2, n_feats*4, stride=1, bn_momentum=0.75)
self.conv5 = build_conv_block(
n_feats*4, n_feats*4, stride=1, bn_momentum=0.75)
self.conv6 = build_conv_block(
n_feats*4, n_feats*8, stride=1, bn_momentum=0.75)
self.pool = nn.MaxPool2d(2, 2)
self.deconv1 = build_deconv_block(n_feats*8, n_feats*8)
self.deconv2 = build_deconv_block(n_feats*12, n_feats*8)
self.deconv3 = build_deconv_block(n_feats*10, n_feats*4)
self.out1 = build_conv_block(
n_feats*5, n_feats*4, stride=1, bn_momentum=0.75)
self.out2 = build_conv_block(
n_feats*4, n_feats*8, stride=1, bn_momentum=0.75)
# In this work, kernel size is 3 instead of 4
self.final = nn.Conv2d(n_feats*8, 3, 3, 1, 1)
self.act = nn.Tanh()
def forward(self, x):
# Downsample
d1 = self.conv1(x)
d1a = self.pool(d1) # (B, 32, 128, 128)
d2 = self.conv2(d1a)
d3 = self.conv3(d2)
d3a = self.pool(d3) # (B, 64, 64, 64)
d4 = self.conv4(d3a)
d5 = self.conv5(d4)
d5a = self.pool(d5) # (B, 128, 32, 32)
d6 = self.conv6(d5a) # (B, 256, 32, 32)
# Upsample
u1 = torch.cat([self.deconv1(d6), d5], dim=1) # (B, 384, 64, 64)
u2 = torch.cat([self.deconv2(u1), d3], dim=1) # (B, 320, 128, 128)
u3 = torch.cat([self.deconv3(u2), d1], dim=1) # (B, 160, 256, 256)
return self.act(self.final(self.out2(self.out1(u3))))
class FUnIEDiscriminator(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEDiscriminator, self).__init__()
# Build discriminator blocks
self.block1 = self._block(3*2, n_feats, False)
self.block2 = self._block(n_feats, n_feats*2)
self.block3 = self._block(n_feats*2, n_feats*4)
self.block4 = self._block(n_feats*4, n_feats*8)
# Validility block
# In this work, kernel size is 3 instead of 4
self.validility = nn.Conv2d(n_feats*8, 1, 3, 1, 1)
def _block(self, in_chans, out_chans, use_bn=True):
layers = []
layers.append(nn.Conv2d(in_chans, out_chans, 3, 2, 1))
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=0.8))
return nn.Sequential(*layers)
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=1) # (B, 6, 256, 256)
x = self.block1(x) # (B, 32, 128, 128)
x = self.block2(x) # (B, 64, 64, 64)
x = self.block3(x) # (B, 128, 32, 32)
x = self.block4(x) # (B, 256, 16, 16)
valid = self.validility(x) # (B, 1, 16, 16)
return valid.squeeze(1)
class ResidualBlock(nn.Module):
def __init__(self, n_feats=64):
super(ResidualBlock, self).__init__()
layers = []
layers.append(nn.Conv2d(n_feats, n_feats, 3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(n_feats, momentum=0.8))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(n_feats, n_feats, 3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(n_feats, momentum=0.8))
self.block = nn.Sequential(*layers)
def forward(self, x):
identity = x
x = self.block(x)
return x + identity
class FUnIEUpGenerator(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEUpGenerator, self).__init__()
# Conv blocks
self.conv1 = build_conv_block(
3, n_feats, 5, padding=2, use_bn=False, use_leaky=True)
self.conv2 = build_conv_block(n_feats, n_feats*4, 4, use_leaky=True)
self.conv3 = build_conv_block(n_feats*4, n_feats*8, 4, use_leaky=True)
self.conv4 = build_conv_block(n_feats*8, n_feats*8, use_leaky=True)
self.conv5 = build_conv_block(n_feats*8, n_feats*8, use_leaky=True)
# Three additional conv layers
self.add_conv1 = nn.Conv2d(n_feats*8, 64, 3, stride=1, padding=1)
self.add_conv2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.add_conv3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
# Residual blocks
self.res_block1 = ResidualBlock()
self.res_block2 = ResidualBlock()
self.res_block3 = ResidualBlock()
self.res_block4 = ResidualBlock()
self.res_block5 = ResidualBlock()
# Deconv blocks
self.deconv1 = self._deconv_block(n_feats*2, n_feats*8)
self.deconv2 = self._deconv_block(n_feats*(8+8), n_feats*8)
self.deconv3 = self._deconv_block(n_feats*(8+8), n_feats*4)
self.deconv4 = self._deconv_block(n_feats*(4+4), n_feats*1)
self.up = nn.Upsample(
scale_factor=2, mode="bilinear", align_corners=True)
# In this work, kernel size is 3 instead of 4
self.final = nn.Conv2d(n_feats*2, 3, 3, stride=1, padding=1)
self.act = nn.Tanh()
def _deconv_block(self, in_chans, out_chans, use_bn=True):
layers = []
layers.append(nn.Upsample(scale_factor=2,
mode="bilinear", align_corners=True))
layers.append(nn.Conv2d(in_chans, out_chans, 3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=0.8))
return nn.Sequential(*layers)
def forward(self, x):
# Downsample
d1 = self.conv1(x) # (B, 32, 128, 128)
d2 = self.conv2(d1) # (B, 128, 64, 64)
d3 = self.conv3(d2) # (B, 256, 32, 32)
d4 = self.conv4(d3) # (B, 256, 16, 16)
d5 = self.conv5(d4) # (B, 256, 8, 8)
# Additional conv layers
a1 = self.relu(self.add_conv1(d5)) # (B, 64, 8, 8)
a2 = self.relu(self.add_conv2(a1))
bridge = self.relu(self.add_conv3(a2))
# Residual blocks
bridge = self.res_block1(bridge)
bridge = self.res_block2(bridge)
bridge = self.res_block3(bridge)
bridge = self.res_block4(bridge)
bridge = self.res_block5(bridge)
bridge += a1
# Upsample
u1 = torch.cat([self.deconv1(bridge), d4], dim=1) # (B, 512, 16, 16)
u2 = torch.cat([self.deconv2(u1), d3], dim=1) # (B, 512, 32, 32)
u3 = torch.cat([self.deconv3(u2), d2], dim=1) # (B, 256, 64, 64)
u4 = torch.cat([self.deconv4(u3), d1], dim=1) # (B, 64, 128, 128)
u4 = self.up(u4) # (B, 64, 256, 256)
return self.act(self.final(u4))
class FUnIEUpDiscriminator(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEUpDiscriminator, self).__init__()
# Build discriminator blocks
self.block1 = build_conv_block(
3, n_feats, use_bn=False, use_leaky=True)
self.block2 = build_conv_block(n_feats, n_feats*2, use_leaky=True)
self.block3 = build_conv_block(n_feats*2, n_feats*4, use_leaky=True)
self.block4 = build_conv_block(n_feats*4, n_feats*8, use_leaky=True)
self.block5 = build_conv_block(
n_feats*8, n_feats*8, stride=1, use_leaky=True)
# Validility block
# In this work, kernel size is 3 instead of 4
self.validility = nn.Conv2d(n_feats*8, 1, 3, stride=1, padding=1)
def forward(self, x):
x = self.block1(x) # (B, 32, 128, 128)
x = self.block2(x) # (B, 64, 64, 64)
x = self.block3(x) # (B, 128, 32, 32)
x = self.block4(x) # (B, 256, 16, 16)
x = self.block5(x) # (B, 256, 16, 16)
valid = self.validility(x) # (B, 1, 16, 16)
return valid.squeeze(1)
if __name__ == "__main__":
model = FUnIEGeneratorV1()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEGeneratorV2()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEDiscriminator()
x1 = torch.rand(1, 3, 256, 256)
x2 = torch.rand(1, 3, 256, 256)
print(model(x1, x2).size())
model = VGGContent()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEUpGenerator()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEUpDiscriminator()
x = torch.rand(1, 3, 256, 256)
print(model(x).size()) | models.py | import torch.nn as nn
import torch
import torchvision.models as models
class TotalGenLoss(nn.Module):
def __init__(self, is_cuda):
super(TotalGenLoss, self).__init__()
self.vgg = VGGContent()
if is_cuda:
self.vgg = self.vgg.cuda()
def forward(self, org_image, gen_image):
vgg_org_image = self.vgg(org_image)
vgg_gen_image = self.vgg(gen_image)
bs = org_image.size(0)
content_loss = ((vgg_org_image - vgg_gen_image) ** 2).mean(1)
mae_gen_loss = (torch.abs(org_image - gen_image)).view(bs, -1).mean(1)
return (0.7 * mae_gen_loss + 0.3 * content_loss).mean()
class VGGContent(nn.Module):
def __init__(self):
super(VGGContent, self).__init__()
self.vgg = models.vgg19_bn(pretrained=True).features
def forward(self, x):
bs = x.size(0)
return self.vgg(x).view(bs, -1)
def build_conv_block(in_chans, out_chans, kernel_size=3, stride=2, padding=1, use_bn=True, bn_momentum=0.8, use_leaky=False):
layers = []
layers.append(nn.Conv2d(in_chans, out_chans, kernel_size, stride, padding))
if use_leaky:
layers.append(nn.LeakyReLU(negative_slope=0.2, inplace=True))
else:
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=bn_momentum))
return nn.Sequential(*layers)
def build_deconv_block(in_chans, out_chans, use_bn=True):
layers = []
layers.append(nn.Upsample(scale_factor=2,
mode="bilinear", align_corners=True))
layers.append(nn.Conv2d(in_chans, out_chans, 3, 1, 1))
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=0.8))
return nn.Sequential(*layers)
class FUnIEGeneratorV1(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEGeneratorV1, self).__init__()
self.conv1 = build_conv_block(
3, n_feats, 5, padding=2, use_bn=False)
self.conv2 = build_conv_block(n_feats, n_feats*4, 4)
self.conv3 = build_conv_block(n_feats*4, n_feats*8, 4)
self.conv4 = build_conv_block(n_feats*8, n_feats*8)
self.conv5 = build_conv_block(n_feats*8, n_feats*8)
self.deconv1 = build_deconv_block(n_feats*8, n_feats*8)
self.deconv2 = build_deconv_block(n_feats*16, n_feats*8)
self.deconv3 = build_deconv_block(n_feats*16, n_feats*4)
self.deconv4 = build_deconv_block(n_feats*8, n_feats*1)
self.deconv5 = nn.Upsample(
scale_factor=2, mode="bilinear", align_corners=True)
# In this work, kernel size is 3 instead of 4
self.final = nn.Conv2d(n_feats*2, 3, 3, 1, 1)
self.act = nn.Tanh()
def forward(self, x):
# Downsample
d1 = self.conv1(x) # (B, 32, 128, 128)
d2 = self.conv2(d1) # (B, 128, 64, 64)
d3 = self.conv3(d2) # (B, 256, 32, 32)
d4 = self.conv4(d3) # (B, 256, 16, 16)
d5 = self.conv5(d4) # (B, 256, 8, 8)
# Upsample
u1 = torch.cat([self.deconv1(d5), d4], dim=1) # (B, 512, 16, 16)
u2 = torch.cat([self.deconv2(u1), d3], dim=1) # (B, 512, 32, 32)
u3 = torch.cat([self.deconv3(u2), d2], dim=1) # (B, 256, 64, 64)
u4 = torch.cat([self.deconv4(u3), d1], dim=1) # (B, 64, 128, 128)
u5 = self.deconv5(u4) # (B, 64, 256, 256)
return self.act(self.final(u5))
class FUnIEGeneratorV2(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEGeneratorV2, self).__init__()
self.conv1 = build_conv_block(
3, n_feats, 5, stride=1, padding=2, use_bn=False)
# In this work, kernel size is 3 instead of 4
self.conv2 = build_conv_block(
n_feats, n_feats*2, stride=1, bn_momentum=0.75)
# In this work, kernel size is 3 instead of 4
self.conv3 = build_conv_block(
n_feats*2, n_feats*2, stride=1, bn_momentum=0.75)
self.conv4 = build_conv_block(
n_feats*2, n_feats*4, stride=1, bn_momentum=0.75)
self.conv5 = build_conv_block(
n_feats*4, n_feats*4, stride=1, bn_momentum=0.75)
self.conv6 = build_conv_block(
n_feats*4, n_feats*8, stride=1, bn_momentum=0.75)
self.pool = nn.MaxPool2d(2, 2)
self.deconv1 = build_deconv_block(n_feats*8, n_feats*8)
self.deconv2 = build_deconv_block(n_feats*12, n_feats*8)
self.deconv3 = build_deconv_block(n_feats*10, n_feats*4)
self.out1 = build_conv_block(
n_feats*5, n_feats*4, stride=1, bn_momentum=0.75)
self.out2 = build_conv_block(
n_feats*4, n_feats*8, stride=1, bn_momentum=0.75)
# In this work, kernel size is 3 instead of 4
self.final = nn.Conv2d(n_feats*8, 3, 3, 1, 1)
self.act = nn.Tanh()
def forward(self, x):
# Downsample
d1 = self.conv1(x)
d1a = self.pool(d1) # (B, 32, 128, 128)
d2 = self.conv2(d1a)
d3 = self.conv3(d2)
d3a = self.pool(d3) # (B, 64, 64, 64)
d4 = self.conv4(d3a)
d5 = self.conv5(d4)
d5a = self.pool(d5) # (B, 128, 32, 32)
d6 = self.conv6(d5a) # (B, 256, 32, 32)
# Upsample
u1 = torch.cat([self.deconv1(d6), d5], dim=1) # (B, 384, 64, 64)
u2 = torch.cat([self.deconv2(u1), d3], dim=1) # (B, 320, 128, 128)
u3 = torch.cat([self.deconv3(u2), d1], dim=1) # (B, 160, 256, 256)
return self.act(self.final(self.out2(self.out1(u3))))
class FUnIEDiscriminator(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEDiscriminator, self).__init__()
# Build discriminator blocks
self.block1 = self._block(3*2, n_feats, False)
self.block2 = self._block(n_feats, n_feats*2)
self.block3 = self._block(n_feats*2, n_feats*4)
self.block4 = self._block(n_feats*4, n_feats*8)
# Validility block
# In this work, kernel size is 3 instead of 4
self.validility = nn.Conv2d(n_feats*8, 1, 3, 1, 1)
def _block(self, in_chans, out_chans, use_bn=True):
layers = []
layers.append(nn.Conv2d(in_chans, out_chans, 3, 2, 1))
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=0.8))
return nn.Sequential(*layers)
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=1) # (B, 6, 256, 256)
x = self.block1(x) # (B, 32, 128, 128)
x = self.block2(x) # (B, 64, 64, 64)
x = self.block3(x) # (B, 128, 32, 32)
x = self.block4(x) # (B, 256, 16, 16)
valid = self.validility(x) # (B, 1, 16, 16)
return valid.squeeze(1)
class ResidualBlock(nn.Module):
def __init__(self, n_feats=64):
super(ResidualBlock, self).__init__()
layers = []
layers.append(nn.Conv2d(n_feats, n_feats, 3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(n_feats, momentum=0.8))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(n_feats, n_feats, 3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(n_feats, momentum=0.8))
self.block = nn.Sequential(*layers)
def forward(self, x):
identity = x
x = self.block(x)
return x + identity
class FUnIEUpGenerator(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEUpGenerator, self).__init__()
# Conv blocks
self.conv1 = build_conv_block(
3, n_feats, 5, padding=2, use_bn=False, use_leaky=True)
self.conv2 = build_conv_block(n_feats, n_feats*4, 4, use_leaky=True)
self.conv3 = build_conv_block(n_feats*4, n_feats*8, 4, use_leaky=True)
self.conv4 = build_conv_block(n_feats*8, n_feats*8, use_leaky=True)
self.conv5 = build_conv_block(n_feats*8, n_feats*8, use_leaky=True)
# Three additional conv layers
self.add_conv1 = nn.Conv2d(n_feats*8, 64, 3, stride=1, padding=1)
self.add_conv2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.add_conv3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
# Residual blocks
self.res_block1 = ResidualBlock()
self.res_block2 = ResidualBlock()
self.res_block3 = ResidualBlock()
self.res_block4 = ResidualBlock()
self.res_block5 = ResidualBlock()
# Deconv blocks
self.deconv1 = self._deconv_block(n_feats*2, n_feats*8)
self.deconv2 = self._deconv_block(n_feats*(8+8), n_feats*8)
self.deconv3 = self._deconv_block(n_feats*(8+8), n_feats*4)
self.deconv4 = self._deconv_block(n_feats*(4+4), n_feats*1)
self.up = nn.Upsample(
scale_factor=2, mode="bilinear", align_corners=True)
# In this work, kernel size is 3 instead of 4
self.final = nn.Conv2d(n_feats*2, 3, 3, stride=1, padding=1)
self.act = nn.Tanh()
def _deconv_block(self, in_chans, out_chans, use_bn=True):
layers = []
layers.append(nn.Upsample(scale_factor=2,
mode="bilinear", align_corners=True))
layers.append(nn.Conv2d(in_chans, out_chans, 3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
if use_bn:
layers.append(nn.BatchNorm2d(out_chans, momentum=0.8))
return nn.Sequential(*layers)
def forward(self, x):
# Downsample
d1 = self.conv1(x) # (B, 32, 128, 128)
d2 = self.conv2(d1) # (B, 128, 64, 64)
d3 = self.conv3(d2) # (B, 256, 32, 32)
d4 = self.conv4(d3) # (B, 256, 16, 16)
d5 = self.conv5(d4) # (B, 256, 8, 8)
# Additional conv layers
a1 = self.relu(self.add_conv1(d5)) # (B, 64, 8, 8)
a2 = self.relu(self.add_conv2(a1))
bridge = self.relu(self.add_conv3(a2))
# Residual blocks
bridge = self.res_block1(bridge)
bridge = self.res_block2(bridge)
bridge = self.res_block3(bridge)
bridge = self.res_block4(bridge)
bridge = self.res_block5(bridge)
bridge += a1
# Upsample
u1 = torch.cat([self.deconv1(bridge), d4], dim=1) # (B, 512, 16, 16)
u2 = torch.cat([self.deconv2(u1), d3], dim=1) # (B, 512, 32, 32)
u3 = torch.cat([self.deconv3(u2), d2], dim=1) # (B, 256, 64, 64)
u4 = torch.cat([self.deconv4(u3), d1], dim=1) # (B, 64, 128, 128)
u4 = self.up(u4) # (B, 64, 256, 256)
return self.act(self.final(u4))
class FUnIEUpDiscriminator(nn.Module):
def __init__(self, n_feats=32):
super(FUnIEUpDiscriminator, self).__init__()
# Build discriminator blocks
self.block1 = build_conv_block(
3, n_feats, use_bn=False, use_leaky=True)
self.block2 = build_conv_block(n_feats, n_feats*2, use_leaky=True)
self.block3 = build_conv_block(n_feats*2, n_feats*4, use_leaky=True)
self.block4 = build_conv_block(n_feats*4, n_feats*8, use_leaky=True)
self.block5 = build_conv_block(
n_feats*8, n_feats*8, stride=1, use_leaky=True)
# Validility block
# In this work, kernel size is 3 instead of 4
self.validility = nn.Conv2d(n_feats*8, 1, 3, stride=1, padding=1)
def forward(self, x):
x = self.block1(x) # (B, 32, 128, 128)
x = self.block2(x) # (B, 64, 64, 64)
x = self.block3(x) # (B, 128, 32, 32)
x = self.block4(x) # (B, 256, 16, 16)
x = self.block5(x) # (B, 256, 16, 16)
valid = self.validility(x) # (B, 1, 16, 16)
return valid.squeeze(1)
if __name__ == "__main__":
model = FUnIEGeneratorV1()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEGeneratorV2()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEDiscriminator()
x1 = torch.rand(1, 3, 256, 256)
x2 = torch.rand(1, 3, 256, 256)
print(model(x1, x2).size())
model = VGGContent()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEUpGenerator()
x = torch.rand(1, 3, 256, 256)
print(model(x).size())
model = FUnIEUpDiscriminator()
x = torch.rand(1, 3, 256, 256)
print(model(x).size()) | 0.938039 | 0.453262 |
import pprint
import re # noqa: F401
import six
class Intervention(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'intervention_type': 'str',
'intervention_name': 'str',
'description': 'str',
'arm_group_label': 'list[str]',
'other_name': 'list[str]'
}
attribute_map = {
'intervention_type': 'intervention_type',
'intervention_name': 'intervention_name',
'description': 'description',
'arm_group_label': 'arm_group_label',
'other_name': 'other_name'
}
def __init__(self, intervention_type='Other', intervention_name=None, description=None, arm_group_label=None, other_name=None): # noqa: E501
"""Intervention - a model defined in Swagger""" # noqa: E501
self._intervention_type = None
self._intervention_name = None
self._description = None
self._arm_group_label = None
self._other_name = None
self.discriminator = None
if intervention_type is not None:
self.intervention_type = intervention_type
self.intervention_name = intervention_name
if description is not None:
self.description = description
if arm_group_label is not None:
self.arm_group_label = arm_group_label
if other_name is not None:
self.other_name = other_name
@property
def intervention_type(self):
"""Gets the intervention_type of this Intervention. # noqa: E501
For each intervention studied in the clinical study, the general type of intervention. # noqa: E501
:return: The intervention_type of this Intervention. # noqa: E501
:rtype: str
"""
return self._intervention_type
@intervention_type.setter
def intervention_type(self, intervention_type):
"""Sets the intervention_type of this Intervention.
For each intervention studied in the clinical study, the general type of intervention. # noqa: E501
:param intervention_type: The intervention_type of this Intervention. # noqa: E501
:type: str
"""
allowed_values = ["Behavioral", "Biological", "Combination Product", "Device", "Diagnostic Test", "Dietary Supplement", "Drug", "Genetic", "Procedure", "Radiation", "Other"] # noqa: E501
if intervention_type not in allowed_values:
raise ValueError(
"Invalid value for `intervention_type` ({0}), must be one of {1}" # noqa: E501
.format(intervention_type, allowed_values)
)
self._intervention_type = intervention_type
@property
def intervention_name(self):
"""Gets the intervention_name of this Intervention. # noqa: E501
A brief descriptive name used to refer to the intervention(s) studied in each arm of the clinical study. # noqa: E501
:return: The intervention_name of this Intervention. # noqa: E501
:rtype: str
"""
return self._intervention_name
@intervention_name.setter
def intervention_name(self, intervention_name):
"""Sets the intervention_name of this Intervention.
A brief descriptive name used to refer to the intervention(s) studied in each arm of the clinical study. # noqa: E501
:param intervention_name: The intervention_name of this Intervention. # noqa: E501
:type: str
"""
if intervention_name is None:
raise ValueError("Invalid value for `intervention_name`, must not be `None`") # noqa: E501
self._intervention_name = intervention_name
@property
def description(self):
"""Gets the description of this Intervention. # noqa: E501
Details that can be made public about the intervention, other than the Intervention Name(s) and Other Intervention Name(s), sufficient to distinguish the intervention from other, similar interventions studied in the same or another clinical study. For example, interventions involving drugs may include dosage form, dosage, frequency, and duration. # noqa: E501
:return: The description of this Intervention. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Intervention.
Details that can be made public about the intervention, other than the Intervention Name(s) and Other Intervention Name(s), sufficient to distinguish the intervention from other, similar interventions studied in the same or another clinical study. For example, interventions involving drugs may include dosage form, dosage, frequency, and duration. # noqa: E501
:param description: The description of this Intervention. # noqa: E501
:type: str
"""
self._description = description
@property
def arm_group_label(self):
"""Gets the arm_group_label of this Intervention. # noqa: E501
If multiple Arms or Groups have been specified, indicate which Arm Groups this intervention applies to. # noqa: E501
:return: The arm_group_label of this Intervention. # noqa: E501
:rtype: list[str]
"""
return self._arm_group_label
@arm_group_label.setter
def arm_group_label(self, arm_group_label):
"""Sets the arm_group_label of this Intervention.
If multiple Arms or Groups have been specified, indicate which Arm Groups this intervention applies to. # noqa: E501
:param arm_group_label: The arm_group_label of this Intervention. # noqa: E501
:type: list[str]
"""
self._arm_group_label = arm_group_label
@property
def other_name(self):
"""Gets the other_name of this Intervention. # noqa: E501
:return: The other_name of this Intervention. # noqa: E501
:rtype: list[str]
"""
return self._other_name
@other_name.setter
def other_name(self, other_name):
"""Sets the other_name of this Intervention.
:param other_name: The other_name of this Intervention. # noqa: E501
:type: list[str]
"""
self._other_name = other_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Intervention, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Intervention):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | mm_power_sdk_python/models/intervention.py | import pprint
import re # noqa: F401
import six
class Intervention(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'intervention_type': 'str',
'intervention_name': 'str',
'description': 'str',
'arm_group_label': 'list[str]',
'other_name': 'list[str]'
}
attribute_map = {
'intervention_type': 'intervention_type',
'intervention_name': 'intervention_name',
'description': 'description',
'arm_group_label': 'arm_group_label',
'other_name': 'other_name'
}
def __init__(self, intervention_type='Other', intervention_name=None, description=None, arm_group_label=None, other_name=None): # noqa: E501
"""Intervention - a model defined in Swagger""" # noqa: E501
self._intervention_type = None
self._intervention_name = None
self._description = None
self._arm_group_label = None
self._other_name = None
self.discriminator = None
if intervention_type is not None:
self.intervention_type = intervention_type
self.intervention_name = intervention_name
if description is not None:
self.description = description
if arm_group_label is not None:
self.arm_group_label = arm_group_label
if other_name is not None:
self.other_name = other_name
@property
def intervention_type(self):
"""Gets the intervention_type of this Intervention. # noqa: E501
For each intervention studied in the clinical study, the general type of intervention. # noqa: E501
:return: The intervention_type of this Intervention. # noqa: E501
:rtype: str
"""
return self._intervention_type
@intervention_type.setter
def intervention_type(self, intervention_type):
"""Sets the intervention_type of this Intervention.
For each intervention studied in the clinical study, the general type of intervention. # noqa: E501
:param intervention_type: The intervention_type of this Intervention. # noqa: E501
:type: str
"""
allowed_values = ["Behavioral", "Biological", "Combination Product", "Device", "Diagnostic Test", "Dietary Supplement", "Drug", "Genetic", "Procedure", "Radiation", "Other"] # noqa: E501
if intervention_type not in allowed_values:
raise ValueError(
"Invalid value for `intervention_type` ({0}), must be one of {1}" # noqa: E501
.format(intervention_type, allowed_values)
)
self._intervention_type = intervention_type
@property
def intervention_name(self):
"""Gets the intervention_name of this Intervention. # noqa: E501
A brief descriptive name used to refer to the intervention(s) studied in each arm of the clinical study. # noqa: E501
:return: The intervention_name of this Intervention. # noqa: E501
:rtype: str
"""
return self._intervention_name
@intervention_name.setter
def intervention_name(self, intervention_name):
"""Sets the intervention_name of this Intervention.
A brief descriptive name used to refer to the intervention(s) studied in each arm of the clinical study. # noqa: E501
:param intervention_name: The intervention_name of this Intervention. # noqa: E501
:type: str
"""
if intervention_name is None:
raise ValueError("Invalid value for `intervention_name`, must not be `None`") # noqa: E501
self._intervention_name = intervention_name
@property
def description(self):
"""Gets the description of this Intervention. # noqa: E501
Details that can be made public about the intervention, other than the Intervention Name(s) and Other Intervention Name(s), sufficient to distinguish the intervention from other, similar interventions studied in the same or another clinical study. For example, interventions involving drugs may include dosage form, dosage, frequency, and duration. # noqa: E501
:return: The description of this Intervention. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Intervention.
Details that can be made public about the intervention, other than the Intervention Name(s) and Other Intervention Name(s), sufficient to distinguish the intervention from other, similar interventions studied in the same or another clinical study. For example, interventions involving drugs may include dosage form, dosage, frequency, and duration. # noqa: E501
:param description: The description of this Intervention. # noqa: E501
:type: str
"""
self._description = description
@property
def arm_group_label(self):
"""Gets the arm_group_label of this Intervention. # noqa: E501
If multiple Arms or Groups have been specified, indicate which Arm Groups this intervention applies to. # noqa: E501
:return: The arm_group_label of this Intervention. # noqa: E501
:rtype: list[str]
"""
return self._arm_group_label
@arm_group_label.setter
def arm_group_label(self, arm_group_label):
"""Sets the arm_group_label of this Intervention.
If multiple Arms or Groups have been specified, indicate which Arm Groups this intervention applies to. # noqa: E501
:param arm_group_label: The arm_group_label of this Intervention. # noqa: E501
:type: list[str]
"""
self._arm_group_label = arm_group_label
@property
def other_name(self):
"""Gets the other_name of this Intervention. # noqa: E501
:return: The other_name of this Intervention. # noqa: E501
:rtype: list[str]
"""
return self._other_name
@other_name.setter
def other_name(self, other_name):
"""Sets the other_name of this Intervention.
:param other_name: The other_name of this Intervention. # noqa: E501
:type: list[str]
"""
self._other_name = other_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Intervention, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Intervention):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.754282 | 0.226698 |
def rainRightJust():
rainfile = open("rainfall.txt","r")
outfile = open("rainfallRightJust.txt","w")
for aLine in rainfile:
values = aLine.split()
cityNames=values[0]
numbers=values[1]
outfile.write("%+25s %+5s \n" % (cityNames,numbers))
rainfile.close()
outfile.close()
##rainRightJust()
###L09-02
def fahrToCels():
outfile = open("tempconv.txt","w")
fahr="Fahrenheit"
cels="Celsius"
outfile.write("%+10s %+10s \n" % (fahr,cels))
for fahrTemp in range(-300,212,1):
celsTemp=(fahrTemp-32)*(5/9)
outfile.write("%10.3f %10.3f \n" % (fahrTemp,celsTemp))
outfile.close()
##fahrToCels()
###L09-03
def readLines():
rainfile=open("rainfall.txt","r")
print(rainfile.readline())
print(rainfile.readline())
print(rainfile.readlines())
rainfile.close()
##readLines()
###L09-04
def readLines2():
rainfile=open("rainfall.txt","r")
print(rainfile.readlines())
rainfile.close()
##readLines2()
###L09-05
def PsalmUpper():
psalm=open("psalm112.txt","r")
PSALM=open("psalm112Upper.txt","w")
for aLine in psalm:
psalmRead=psalm.readlines()
PSALM.write(str([x.upper() for x in psalmRead]))
psalm.close()
PSALM.close()
##PsalmUpper()
###L09-06
def counting():
psalm=open("psalm112.txt","r")
lines=0
words=0
characters=0
for aLine in psalm:
lines+=1
words1=aLine.split()
for aWord in words1:
words+=1
for aChar in aWord:
characters+=1
characters+=1
print(lines," lines")
print(words," words")
print(characters," characters")
psalm.close()
##counting()
###L09-07
def concord():
textIn=open("textIn.txt","r")
concord=open("concord.txt","w")
D={}
linecount=0
for aLine in textIn:
linecount+=1
words=aLine.split()
for word in words:
if word in D:
D[word].append(linecount)
else:
D[word]=[linecount]
for keys in D:
concord.write("%+15s %s \n" % (keys,str(D[keys])))
textIn.close()
concord.close()
##concord()
###L09-08
def readStudentScores(fileName,pointsD,scoresD):
studentScores=open(fileName,"w")
keys=list(pointsD)
keys.sort()
for key in keys:
studentScores.write("%+3s %s" % (key,str(pointsD[key])))
studentScores.write("\n")
for key in scoresD:
studentScores.write("%s" % (key))
scores=list(scoresD[key])
scores.sort()
for score in scores:
studentScores.write("%+3s %i" % (score,scoresD[key][score]))
studentScores.write("\n")
studentScores.close()
PPD={'T1':100, 'T2':100, 'H1':10, 'H2':20}
SSD={'Jones':{'T1':100, 'T2':100, 'H1':10, 'H2':20},'Smith':{'T1':95, 'T2':100, 'H1':10, 'H2':12}, 'Armes': {'T1':100, 'T2':95, 'H1':0, 'H2':18}}
readStudentScores("students_scores.txt",PPD,SSD)
###L09-09
def printScoresMatrix(scoresD):
studentScores=open("scoresMatrix.txt","w")
keys=['T1', 'T2', 'H1', 'H2']
keys.sort()
studentScores.write(" ")
for key in keys:
studentScores.write("%+5s" % (key))
space=5
studentScores.write("\n")
for key in scoresD:
studentScores.write("%s" % (key))
scores=list(scoresD[key])
scores.sort()
for score in scores:
studentScores.write("%5i" % (scoresD[key][score]))
studentScores.write("\n")
studentScores.close()
printScoresMatrix(SSD)
###L09-10
def printScoresMatrix1(scoresD):
studentScores=open("scoresMatrixAverage.txt","w")
keys=['T1', 'T2', 'H1', 'H2']
keys.sort()
studentScores.write(" ")
H1=0
H2=0
T1=0
T2=0
for key in keys:
studentScores.write("%+5s" % (key))
space=5
studentScores.write("\n")
student1=0
student2=0
student3=0
for key in scoresD:
studentScores.write("%s" % (key))
scores=list(scoresD[key])
scores.sort()
counter=1
for score in scores:
studentScores.write("%5i" % (scoresD[key][score]))
## print(scoresD[key],scoresD[key][score])
if score=='H1':
H1=H1+scoresD[key][score]
## print(H1)
elif score=='H2':
H2=H2+scoresD[key][score]
## print(H2)
elif score=='T2':
T2=T2+scoresD[key][score]
## print(T2)
elif score=='T1':
T1=T1+scoresD[key][score]
## print(T1)
student1=student1+(scoresD[key][score])
student1=student1/4
studentScores.write("%5.1f" % (student1))
studentScores.write("\n")
H1A=H1/3
H2A=H2/3
T1A=T1/3
T2A=T2/3
## print(H1,H2,T1,T2,H1A,H2A,T1A,T2A)
studentScores.write(" ")
studentScores.write("%5.1f %5.1f %5.1f %5.1f" % (H1A,H2A,T1A,T2A))
studentScores.close()
printScoresMatrix1(SSD) | COS120/LABS/LAB09/LAB09.py | def rainRightJust():
rainfile = open("rainfall.txt","r")
outfile = open("rainfallRightJust.txt","w")
for aLine in rainfile:
values = aLine.split()
cityNames=values[0]
numbers=values[1]
outfile.write("%+25s %+5s \n" % (cityNames,numbers))
rainfile.close()
outfile.close()
##rainRightJust()
###L09-02
def fahrToCels():
outfile = open("tempconv.txt","w")
fahr="Fahrenheit"
cels="Celsius"
outfile.write("%+10s %+10s \n" % (fahr,cels))
for fahrTemp in range(-300,212,1):
celsTemp=(fahrTemp-32)*(5/9)
outfile.write("%10.3f %10.3f \n" % (fahrTemp,celsTemp))
outfile.close()
##fahrToCels()
###L09-03
def readLines():
rainfile=open("rainfall.txt","r")
print(rainfile.readline())
print(rainfile.readline())
print(rainfile.readlines())
rainfile.close()
##readLines()
###L09-04
def readLines2():
rainfile=open("rainfall.txt","r")
print(rainfile.readlines())
rainfile.close()
##readLines2()
###L09-05
def PsalmUpper():
psalm=open("psalm112.txt","r")
PSALM=open("psalm112Upper.txt","w")
for aLine in psalm:
psalmRead=psalm.readlines()
PSALM.write(str([x.upper() for x in psalmRead]))
psalm.close()
PSALM.close()
##PsalmUpper()
###L09-06
def counting():
psalm=open("psalm112.txt","r")
lines=0
words=0
characters=0
for aLine in psalm:
lines+=1
words1=aLine.split()
for aWord in words1:
words+=1
for aChar in aWord:
characters+=1
characters+=1
print(lines," lines")
print(words," words")
print(characters," characters")
psalm.close()
##counting()
###L09-07
def concord():
textIn=open("textIn.txt","r")
concord=open("concord.txt","w")
D={}
linecount=0
for aLine in textIn:
linecount+=1
words=aLine.split()
for word in words:
if word in D:
D[word].append(linecount)
else:
D[word]=[linecount]
for keys in D:
concord.write("%+15s %s \n" % (keys,str(D[keys])))
textIn.close()
concord.close()
##concord()
###L09-08
def readStudentScores(fileName,pointsD,scoresD):
studentScores=open(fileName,"w")
keys=list(pointsD)
keys.sort()
for key in keys:
studentScores.write("%+3s %s" % (key,str(pointsD[key])))
studentScores.write("\n")
for key in scoresD:
studentScores.write("%s" % (key))
scores=list(scoresD[key])
scores.sort()
for score in scores:
studentScores.write("%+3s %i" % (score,scoresD[key][score]))
studentScores.write("\n")
studentScores.close()
PPD={'T1':100, 'T2':100, 'H1':10, 'H2':20}
SSD={'Jones':{'T1':100, 'T2':100, 'H1':10, 'H2':20},'Smith':{'T1':95, 'T2':100, 'H1':10, 'H2':12}, 'Armes': {'T1':100, 'T2':95, 'H1':0, 'H2':18}}
readStudentScores("students_scores.txt",PPD,SSD)
###L09-09
def printScoresMatrix(scoresD):
studentScores=open("scoresMatrix.txt","w")
keys=['T1', 'T2', 'H1', 'H2']
keys.sort()
studentScores.write(" ")
for key in keys:
studentScores.write("%+5s" % (key))
space=5
studentScores.write("\n")
for key in scoresD:
studentScores.write("%s" % (key))
scores=list(scoresD[key])
scores.sort()
for score in scores:
studentScores.write("%5i" % (scoresD[key][score]))
studentScores.write("\n")
studentScores.close()
printScoresMatrix(SSD)
###L09-10
def printScoresMatrix1(scoresD):
studentScores=open("scoresMatrixAverage.txt","w")
keys=['T1', 'T2', 'H1', 'H2']
keys.sort()
studentScores.write(" ")
H1=0
H2=0
T1=0
T2=0
for key in keys:
studentScores.write("%+5s" % (key))
space=5
studentScores.write("\n")
student1=0
student2=0
student3=0
for key in scoresD:
studentScores.write("%s" % (key))
scores=list(scoresD[key])
scores.sort()
counter=1
for score in scores:
studentScores.write("%5i" % (scoresD[key][score]))
## print(scoresD[key],scoresD[key][score])
if score=='H1':
H1=H1+scoresD[key][score]
## print(H1)
elif score=='H2':
H2=H2+scoresD[key][score]
## print(H2)
elif score=='T2':
T2=T2+scoresD[key][score]
## print(T2)
elif score=='T1':
T1=T1+scoresD[key][score]
## print(T1)
student1=student1+(scoresD[key][score])
student1=student1/4
studentScores.write("%5.1f" % (student1))
studentScores.write("\n")
H1A=H1/3
H2A=H2/3
T1A=T1/3
T2A=T2/3
## print(H1,H2,T1,T2,H1A,H2A,T1A,T2A)
studentScores.write(" ")
studentScores.write("%5.1f %5.1f %5.1f %5.1f" % (H1A,H2A,T1A,T2A))
studentScores.close()
printScoresMatrix1(SSD) | 0.160266 | 0.194731 |
import mariadb
import hashlib
import os
# set by other programs
PASSWORD = ""
# sets up a connection to the db
def getconn():
connection = mariadb.connect(user="root", host="mariadb", password=PASSWORD, autocommit=True)
cur = connection.cursor()
cur.execute("USE TLIS;")
cur.close()
return connection
# executes query with data substituting ?s and then returns (if ret true)
def query(squery, qdata=None, ret=True):
if not qdata:
pass
elif type(qdata) == type([]) or type(qdata) == type(("i", "tuple")):
qdata = tuple(qdata)
else:
qdata = (qdata,)
connection = getconn()
print(squery)
with connection:
cur = connection.cursor()
if qdata:
cur.execute(squery, qdata)
else:
cur.execute(squery)
rows = []
if ret:
rows = cur.fetchall()
cur.close()
return(rows)
# object with all them types
types = {
"Asset":
{
"db":"assets",
"template":{"id": 0, "asset_type": 0, "asset_number": ""},
"create":"INSERT INTO assets (type, number) VALUES (?, ?) RETURNING id;",
"update": "UPDATE assets SET type = ?, number = ? WHERE id = ?;",
"get": "SELECT id, type, number FROM assets;"
},
"Customer":
{
"db":"customers",
"template":{"id": 0, "number": "", "first_name": "", "last_name": "", "email": "", "grade": 0, "staff": 0},
"create":"INSERT INTO customers (number, first_name, last_name, email, grade, staff) VALUES (?, ?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE customers SET number = ?, first_name = ?, last_name = ?, email = ?, grade = ?, staff = ? WHERE id = ?",
"get": "SELECT id, number, first_name, last_name, email, grade, staff FROM customers;"
},
"Tech":
{
"db":"techs",
"template":{"id": 0, "customer_id": 0, "username":""},
"create":"INSERT INTO techs (customer_id, username, permission, password, salt) VALUES (?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE techs SET customer_id = ?, username = ?, permission = ?, password = ?, salt = ? WHERE id = ?;",
"get": "SELECT id, customer_id, username FROM techs;",
"perms": ["tlis_sysadmin", "tlis_manager", "tlis_tech"]
},
"AssetType":
{
"db":"asset_types",
"template":{"id": 0, "name": "", "prefix": "", "default_duration": 0, "description": ""},
"create":"INSERT INTO asset_types (name, prefix, max_time_out, description) VALUES (?, ?, ?, ?) RETURNING id;",
"update": "UPDATE asset_types SET name = ?, prefix = ?, max_time_out = ?, description = ? WHERE id = ?;",
"get": "SELECT id, name, prefix, max_time_out, description FROM asset_types;"
},
"TransactionType":
{
"db":"transaction_types",
"template":{"id": 0, "name": "", "description": ""},
"create":"INSERT INTO transaction_types (name, description) VALUES (?, ?) RETURNING id;",
"update": "UPDATE transaction_types SET name = ?, description = ? WHERE id = ?;",
"get": "SELECT id, name, description FROM transaction_types;"
},
"Checkout":
{
"db":"transactions_out",
"template":{"id":0, "asset_id":0, "customer_id":0, "tech_id":0, "transaction_type":0, "time":0, "time_due":0, "notes":""},
"create":"INSERT INTO transactions_out (asset_id, customer_id, tech_id, type, time_now, time_due, notes) VALUES (?, ?, ?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE transactions_out SET asset_id = ?, customer_id = ?, tech_id = ?, type = ?, time_now = ?, time_due = ?, notes = ? WHERE id = ?",
"get": "SELECT id, asset_id, customer_id, tech_id, type, time_now, time_due, notes FROM transactions_out;"
},
"Checkin":
{
"db":"transactions_in",
"template":{"id":0, "transaction_type":0, "tech_id":0, "time":0, "notes":""},
"create":"INSERT INTO transactions_in (id, type, tech_id, time_now, notes) VALUES (?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE transactions_in SET type = ?, tech_id = ?, time_now = ?, notes = ? WHERE id = ?;",
"get": "SELECT id, type, tech_id, time_now, notes FROM transactions_in;"
},
}
# function that runs when a normal request is sent
def run(data):
if data["action"] == "ADD":
if(data["type"] == "Tech"):
ppassword = data["password"]
data["salt"] = os.urandom(32)
data["password"] = <PASSWORD>_<PASSWORD>('<PASSWORD>', data["password"].encode('utf-8'), salt, 100000)
query(f"CREATE USER ? INDENTIFIED BY '?'", (data['username'], ppassword), ret=False)
query(f"GRANT ? TO ?", (types['Tech']['perms'][data['permission']], data['username']), ret=False)
print(data.values())
data["id"] = query(types[data["type"]]["create"], list(data.values())[2:])[0][0]
elif data["action"] == "UPDATE":
id = data["id"]
del data["id"]
query(types[data["type"]]["update"], list(data.values())[2:] + [id], False)
data["id"] = id
if(data["type"] == "Tech"):
for role in types["Tech"]["perms"]:
users = query(f"SELECT user FROM mysql.user WHERE is_role='?'", (role))[0]
if(data["username"] in users):
query(f"REVOKE ? FROM ?", (role, username), ret=False)
query(f"GRANT ? TO ?", (types['Tech']['perms'][data['permission']], data['username']), ret=False)
elif data["action"] == "DELETE":
if(data["type"] == "Tech"):
username = query(f"SELECT username FROM techs WHERE id = ?", (data['id']))[0][0]
query(f"DROP USER ?", (username), ret=False)
query(f"DELETE FROM ? WHERE id = ?", (types[data['type']]['db'], data['id']), ret=False)
else:
data = {"type":"Error", "error_type":"TOM", "reason":"no action given"}
return data
# where user is authed on login
def auth(data):
print("auth has begun")
result = query("SELECT password, salt, permission FROM techs WHERE username = ?;", data["username"])[0]
key = result[0]
salt = result[1]
permission = result[2]
password_to_check = data["password"]
print(password_to_check)
new_key = hashlib.pbkdf2_hmac(
'sha256',
password_to_check.encode('utf-8'),
salt,
100000
)
key = key[:32]
print("auth has completed")
if new_key == key:
return True, permission
else:
return False, permission
# gets all record in db and sends to users
def login():
data = []
for obj in types:
result = query(types[obj]["get"])
for row in result:
final = types[obj]["template"].copy()
i = 0
for key in final.keys():
final[key] = row[i]
i += 1
data.append(final)
return data | app/manager.py |
import mariadb
import hashlib
import os
# set by other programs
PASSWORD = ""
# sets up a connection to the db
def getconn():
connection = mariadb.connect(user="root", host="mariadb", password=PASSWORD, autocommit=True)
cur = connection.cursor()
cur.execute("USE TLIS;")
cur.close()
return connection
# executes query with data substituting ?s and then returns (if ret true)
def query(squery, qdata=None, ret=True):
if not qdata:
pass
elif type(qdata) == type([]) or type(qdata) == type(("i", "tuple")):
qdata = tuple(qdata)
else:
qdata = (qdata,)
connection = getconn()
print(squery)
with connection:
cur = connection.cursor()
if qdata:
cur.execute(squery, qdata)
else:
cur.execute(squery)
rows = []
if ret:
rows = cur.fetchall()
cur.close()
return(rows)
# object with all them types
types = {
"Asset":
{
"db":"assets",
"template":{"id": 0, "asset_type": 0, "asset_number": ""},
"create":"INSERT INTO assets (type, number) VALUES (?, ?) RETURNING id;",
"update": "UPDATE assets SET type = ?, number = ? WHERE id = ?;",
"get": "SELECT id, type, number FROM assets;"
},
"Customer":
{
"db":"customers",
"template":{"id": 0, "number": "", "first_name": "", "last_name": "", "email": "", "grade": 0, "staff": 0},
"create":"INSERT INTO customers (number, first_name, last_name, email, grade, staff) VALUES (?, ?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE customers SET number = ?, first_name = ?, last_name = ?, email = ?, grade = ?, staff = ? WHERE id = ?",
"get": "SELECT id, number, first_name, last_name, email, grade, staff FROM customers;"
},
"Tech":
{
"db":"techs",
"template":{"id": 0, "customer_id": 0, "username":""},
"create":"INSERT INTO techs (customer_id, username, permission, password, salt) VALUES (?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE techs SET customer_id = ?, username = ?, permission = ?, password = ?, salt = ? WHERE id = ?;",
"get": "SELECT id, customer_id, username FROM techs;",
"perms": ["tlis_sysadmin", "tlis_manager", "tlis_tech"]
},
"AssetType":
{
"db":"asset_types",
"template":{"id": 0, "name": "", "prefix": "", "default_duration": 0, "description": ""},
"create":"INSERT INTO asset_types (name, prefix, max_time_out, description) VALUES (?, ?, ?, ?) RETURNING id;",
"update": "UPDATE asset_types SET name = ?, prefix = ?, max_time_out = ?, description = ? WHERE id = ?;",
"get": "SELECT id, name, prefix, max_time_out, description FROM asset_types;"
},
"TransactionType":
{
"db":"transaction_types",
"template":{"id": 0, "name": "", "description": ""},
"create":"INSERT INTO transaction_types (name, description) VALUES (?, ?) RETURNING id;",
"update": "UPDATE transaction_types SET name = ?, description = ? WHERE id = ?;",
"get": "SELECT id, name, description FROM transaction_types;"
},
"Checkout":
{
"db":"transactions_out",
"template":{"id":0, "asset_id":0, "customer_id":0, "tech_id":0, "transaction_type":0, "time":0, "time_due":0, "notes":""},
"create":"INSERT INTO transactions_out (asset_id, customer_id, tech_id, type, time_now, time_due, notes) VALUES (?, ?, ?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE transactions_out SET asset_id = ?, customer_id = ?, tech_id = ?, type = ?, time_now = ?, time_due = ?, notes = ? WHERE id = ?",
"get": "SELECT id, asset_id, customer_id, tech_id, type, time_now, time_due, notes FROM transactions_out;"
},
"Checkin":
{
"db":"transactions_in",
"template":{"id":0, "transaction_type":0, "tech_id":0, "time":0, "notes":""},
"create":"INSERT INTO transactions_in (id, type, tech_id, time_now, notes) VALUES (?, ?, ?, ?, ?) RETURNING id;",
"update": "UPDATE transactions_in SET type = ?, tech_id = ?, time_now = ?, notes = ? WHERE id = ?;",
"get": "SELECT id, type, tech_id, time_now, notes FROM transactions_in;"
},
}
# function that runs when a normal request is sent
def run(data):
if data["action"] == "ADD":
if(data["type"] == "Tech"):
ppassword = data["password"]
data["salt"] = os.urandom(32)
data["password"] = <PASSWORD>_<PASSWORD>('<PASSWORD>', data["password"].encode('utf-8'), salt, 100000)
query(f"CREATE USER ? INDENTIFIED BY '?'", (data['username'], ppassword), ret=False)
query(f"GRANT ? TO ?", (types['Tech']['perms'][data['permission']], data['username']), ret=False)
print(data.values())
data["id"] = query(types[data["type"]]["create"], list(data.values())[2:])[0][0]
elif data["action"] == "UPDATE":
id = data["id"]
del data["id"]
query(types[data["type"]]["update"], list(data.values())[2:] + [id], False)
data["id"] = id
if(data["type"] == "Tech"):
for role in types["Tech"]["perms"]:
users = query(f"SELECT user FROM mysql.user WHERE is_role='?'", (role))[0]
if(data["username"] in users):
query(f"REVOKE ? FROM ?", (role, username), ret=False)
query(f"GRANT ? TO ?", (types['Tech']['perms'][data['permission']], data['username']), ret=False)
elif data["action"] == "DELETE":
if(data["type"] == "Tech"):
username = query(f"SELECT username FROM techs WHERE id = ?", (data['id']))[0][0]
query(f"DROP USER ?", (username), ret=False)
query(f"DELETE FROM ? WHERE id = ?", (types[data['type']]['db'], data['id']), ret=False)
else:
data = {"type":"Error", "error_type":"TOM", "reason":"no action given"}
return data
# where user is authed on login
def auth(data):
print("auth has begun")
result = query("SELECT password, salt, permission FROM techs WHERE username = ?;", data["username"])[0]
key = result[0]
salt = result[1]
permission = result[2]
password_to_check = data["password"]
print(password_to_check)
new_key = hashlib.pbkdf2_hmac(
'sha256',
password_to_check.encode('utf-8'),
salt,
100000
)
key = key[:32]
print("auth has completed")
if new_key == key:
return True, permission
else:
return False, permission
# gets all record in db and sends to users
def login():
data = []
for obj in types:
result = query(types[obj]["get"])
for row in result:
final = types[obj]["template"].copy()
i = 0
for key in final.keys():
final[key] = row[i]
i += 1
data.append(final)
return data | 0.193948 | 0.186188 |
TWEET_EXPANSION = "attachments.poll_ids,attachments.media_keys,author_id,geo.place_id,in_reply_to_user_id,referenced_tweets.id,entities.mentions.username,referenced_tweets.id.author_id"
SPACE_EXPANSION = "invited_user_ids,speaker_ids,creator_id,host_ids"
LIST_EXPANSION = "owner_id"
PINNED_TWEET_EXPANSION = "pinned_tweet_id"
TWEET_FIELD = "attachments,author_id,context_annotations,conversation_id,created_at,geo,entities,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld"
USER_FIELD = "created_at,description,entities,id,location,name,profile_image_url,protected,public_metrics,url,username,verified,withheld,pinned_tweet_id"
SPACE_FIELD = "host_ids,created_at,creator_id,id,lang,invited_user_ids,participant_count,speaker_ids,started_at,state,title,updated_at,scheduled_start,is_ticketed"
MEDIA_FIELD = "duration_ms,height,media_key,preview_image_url,public_metrics,type,url,width"
PLACE_FIELD = "contained_within,country,country_code,full_name,geo,id,name,place_type"
POLL_FIELD = "duration_minutes,end_datetime,id,options,voting_status"
TOPIC_FIELD = "id,name,description"
LIST_FIELD = "created_at,follower_count,member_count,private,description,owner_id"
# Indicator for the return_when argument in wait_for_futures method.
FIRST_COMPLETED = "FIRST_COMPLETED"
FIRST_EXCEPTION = "FIRST_EXCEPTION"
ALL_COMPLETED = "ALL_COMPLETED"
# Language codes for subtitle that based on BCP47 style.
LANGUAGES_CODES = {
"ar-SA": "Arabic",
"bn-BD": "Bangla",
"bn-IN": "Bangla",
"cs-CZ": "Czech",
"da-DK": "Danish",
"de-AT": "German",
"de-CH": "German",
"de-DE": "German",
"el-GR": "Greek",
"en-AU": "English",
"en-CA": "English",
"en-GB": "English",
"en-IE": "English",
"en-IN": "English",
"en-NZ": "English",
"en-US": "English",
"en-ZA": "English",
"es-AR": "Spanish",
"es-CL": "Spanish",
"es-CO": "Spanish",
"es-ES": "Spanish",
"es-MX": "Spanish",
"es-US": "Spanish",
"fi-FI": "Finnish",
"fr-BE": "French",
"fr-CA": "French",
"fr-CH": "French",
"fr-FR": "French",
"he-IL": "Hebrew",
"hi-IN": "Hindi",
"hu-HU": "Hungarian",
"id-ID": "Indonesian",
"it-CH": "Italian",
"it-IT": "Italian",
"jp-JP": "Japanese",
"ko-KR": "Korean",
"nl-BE": "Dutch",
"nl-NL": "Dutch",
"no-NO": "Norwegian",
"pl-PL": "Polish",
"pt-BR": "Portugese",
"pt-PT": "Portugese",
"ro-RO": "Romanian",
"ru-RU": "Russian",
"sk-SK": "Slovak",
"sv-SE": "Swedish",
"ta-IN": "Tamil",
"ta-LK": "Tamil",
"th-TH": "Thai",
"tr-TR": "Turkish",
"zh-CN": "Chinese",
"zh-HK": "Chinese",
"zh-TW": "Chinese",
} | pytweet/constants.py | TWEET_EXPANSION = "attachments.poll_ids,attachments.media_keys,author_id,geo.place_id,in_reply_to_user_id,referenced_tweets.id,entities.mentions.username,referenced_tweets.id.author_id"
SPACE_EXPANSION = "invited_user_ids,speaker_ids,creator_id,host_ids"
LIST_EXPANSION = "owner_id"
PINNED_TWEET_EXPANSION = "pinned_tweet_id"
TWEET_FIELD = "attachments,author_id,context_annotations,conversation_id,created_at,geo,entities,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld"
USER_FIELD = "created_at,description,entities,id,location,name,profile_image_url,protected,public_metrics,url,username,verified,withheld,pinned_tweet_id"
SPACE_FIELD = "host_ids,created_at,creator_id,id,lang,invited_user_ids,participant_count,speaker_ids,started_at,state,title,updated_at,scheduled_start,is_ticketed"
MEDIA_FIELD = "duration_ms,height,media_key,preview_image_url,public_metrics,type,url,width"
PLACE_FIELD = "contained_within,country,country_code,full_name,geo,id,name,place_type"
POLL_FIELD = "duration_minutes,end_datetime,id,options,voting_status"
TOPIC_FIELD = "id,name,description"
LIST_FIELD = "created_at,follower_count,member_count,private,description,owner_id"
# Indicator for the return_when argument in wait_for_futures method.
FIRST_COMPLETED = "FIRST_COMPLETED"
FIRST_EXCEPTION = "FIRST_EXCEPTION"
ALL_COMPLETED = "ALL_COMPLETED"
# Language codes for subtitle that based on BCP47 style.
LANGUAGES_CODES = {
"ar-SA": "Arabic",
"bn-BD": "Bangla",
"bn-IN": "Bangla",
"cs-CZ": "Czech",
"da-DK": "Danish",
"de-AT": "German",
"de-CH": "German",
"de-DE": "German",
"el-GR": "Greek",
"en-AU": "English",
"en-CA": "English",
"en-GB": "English",
"en-IE": "English",
"en-IN": "English",
"en-NZ": "English",
"en-US": "English",
"en-ZA": "English",
"es-AR": "Spanish",
"es-CL": "Spanish",
"es-CO": "Spanish",
"es-ES": "Spanish",
"es-MX": "Spanish",
"es-US": "Spanish",
"fi-FI": "Finnish",
"fr-BE": "French",
"fr-CA": "French",
"fr-CH": "French",
"fr-FR": "French",
"he-IL": "Hebrew",
"hi-IN": "Hindi",
"hu-HU": "Hungarian",
"id-ID": "Indonesian",
"it-CH": "Italian",
"it-IT": "Italian",
"jp-JP": "Japanese",
"ko-KR": "Korean",
"nl-BE": "Dutch",
"nl-NL": "Dutch",
"no-NO": "Norwegian",
"pl-PL": "Polish",
"pt-BR": "Portugese",
"pt-PT": "Portugese",
"ro-RO": "Romanian",
"ru-RU": "Russian",
"sk-SK": "Slovak",
"sv-SE": "Swedish",
"ta-IN": "Tamil",
"ta-LK": "Tamil",
"th-TH": "Thai",
"tr-TR": "Turkish",
"zh-CN": "Chinese",
"zh-HK": "Chinese",
"zh-TW": "Chinese",
} | 0.337859 | 0.23456 |
from flask import Flask
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean
from database import Base
import settings
import stripe
import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = settings.DB_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.TRACK_MODIFICATIONS
stripe.api_key = settings.STRIPE_SECRET_KEY # Stripe's API key
class MerchantUserConnection(Base):
__tablename__ = 'merchantuserconnections'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
merchant_id = Column(String(length=50))
data = Column(String(length=1000))
def __init__(self, user_id, merchant_id, data):
self.user_id = user_id
self.merchant_id = merchant_id
self.data = data
def __repr__(self):
return '<user_id {} merchant_id {}>'.format(self.user_id, self.merchant_id)
class Expense(Base):
__tablename__ = 'expenses'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
description = Column(String(length=1000))
time = Column(DateTime())
def __init__(self, user_id, amount, description):
self.user_id = user_id
self.amount = amount
self.description = description
self.time = datetime.datetime.now()
def __repr__(self):
return '<user_id {} expense {}>'.format(self.user_id, self.amount)
class Income(Base):
__tablename__ = 'incomes'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
description = Column(String(length=1000))
time = Column(DateTime())
def __init__(self, user_id, amount, description):
self.user_id = user_id
self.amount = amount
self.description = description
self.time = datetime.datetime.now()
def __repr__(self):
return '<user_id {} income {}>'.format(self.user_id, self.amount)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(length=100))
email = Column(String(length=120), unique=True)
password = Column(String(length=50))
verify = Column(Boolean())
stripe_customer_id = Column(String(length=100))
_customer = None
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
self.verify = True
def customer(self):
customer = stripe.Customer.retrieve(self.stripe_customer_id)
self._customer = customer
return customer
def __repr__(self):
return '<User email {}>'.format(self.email)
class Merchant(Base):
__tablename__ = 'merchants'
id = Column(Integer, primary_key=True)
name = Column(String(length=100))
email = Column(String(length=120), unique=True)
password = Column(String(length=50))
verify = Column(Boolean())
stripe_customer_id = Column(String(length=100))
_customer = None
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
self.verify = True
def customer(self):
customer = stripe.Customer.retrieve(self.stripe_customer_id)
self._customer = customer
return customer
def __repr__(self):
return '<Merchant email {}>'.format(self.email)
class StockPurchase(Base):
__tablename__ = 'stockpurchases'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
quantity = Column(Float())
time = Column(DateTime())
def __init__(self, user_id, amount):
self.user_id = user_id
self.amount = amount
self.time = datetime.datetime.now()
def __repr__(self):
return '<user_id {} expense {}>'.format(self.user_id, self.amount)
class CustomerPurchase(Base):
__tablename__ = 'customerpurchase'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
merchant_id = Column(String(length=50))
stripe_charge_id = Column(String(length=50))
stripe_payout_id = Column(String(length=50))
time = Column(DateTime())
def __init__(self, user_id, amount, merchant_id, stripe_charge_id, stripe_payout_id):
self.user_id = user_id
self.amount = amount
self.merchant_id = merchant_id
self.stripe_charge_id = stripe_charge_id
self.stripe_payout_id = stripe_payout_id
self.time = datetime.datetime.now()
def __repr__(self):
return '<customer_purchase {} customer {} merchant {}>'.format(self.id, self.user_id, self.merchant_id) | models.py | from flask import Flask
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean
from database import Base
import settings
import stripe
import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = settings.DB_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.TRACK_MODIFICATIONS
stripe.api_key = settings.STRIPE_SECRET_KEY # Stripe's API key
class MerchantUserConnection(Base):
__tablename__ = 'merchantuserconnections'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
merchant_id = Column(String(length=50))
data = Column(String(length=1000))
def __init__(self, user_id, merchant_id, data):
self.user_id = user_id
self.merchant_id = merchant_id
self.data = data
def __repr__(self):
return '<user_id {} merchant_id {}>'.format(self.user_id, self.merchant_id)
class Expense(Base):
__tablename__ = 'expenses'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
description = Column(String(length=1000))
time = Column(DateTime())
def __init__(self, user_id, amount, description):
self.user_id = user_id
self.amount = amount
self.description = description
self.time = datetime.datetime.now()
def __repr__(self):
return '<user_id {} expense {}>'.format(self.user_id, self.amount)
class Income(Base):
__tablename__ = 'incomes'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
description = Column(String(length=1000))
time = Column(DateTime())
def __init__(self, user_id, amount, description):
self.user_id = user_id
self.amount = amount
self.description = description
self.time = datetime.datetime.now()
def __repr__(self):
return '<user_id {} income {}>'.format(self.user_id, self.amount)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(length=100))
email = Column(String(length=120), unique=True)
password = Column(String(length=50))
verify = Column(Boolean())
stripe_customer_id = Column(String(length=100))
_customer = None
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
self.verify = True
def customer(self):
customer = stripe.Customer.retrieve(self.stripe_customer_id)
self._customer = customer
return customer
def __repr__(self):
return '<User email {}>'.format(self.email)
class Merchant(Base):
__tablename__ = 'merchants'
id = Column(Integer, primary_key=True)
name = Column(String(length=100))
email = Column(String(length=120), unique=True)
password = Column(String(length=50))
verify = Column(Boolean())
stripe_customer_id = Column(String(length=100))
_customer = None
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
self.verify = True
def customer(self):
customer = stripe.Customer.retrieve(self.stripe_customer_id)
self._customer = customer
return customer
def __repr__(self):
return '<Merchant email {}>'.format(self.email)
class StockPurchase(Base):
__tablename__ = 'stockpurchases'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
quantity = Column(Float())
time = Column(DateTime())
def __init__(self, user_id, amount):
self.user_id = user_id
self.amount = amount
self.time = datetime.datetime.now()
def __repr__(self):
return '<user_id {} expense {}>'.format(self.user_id, self.amount)
class CustomerPurchase(Base):
__tablename__ = 'customerpurchase'
id = Column(Integer, primary_key=True)
user_id = Column(String(length=50))
amount = Column(Float())
merchant_id = Column(String(length=50))
stripe_charge_id = Column(String(length=50))
stripe_payout_id = Column(String(length=50))
time = Column(DateTime())
def __init__(self, user_id, amount, merchant_id, stripe_charge_id, stripe_payout_id):
self.user_id = user_id
self.amount = amount
self.merchant_id = merchant_id
self.stripe_charge_id = stripe_charge_id
self.stripe_payout_id = stripe_payout_id
self.time = datetime.datetime.now()
def __repr__(self):
return '<customer_purchase {} customer {} merchant {}>'.format(self.id, self.user_id, self.merchant_id) | 0.632049 | 0.081813 |
## The script can be run with Python 3.6 or higher version.
## The script requires 'requests' library to make the API calls.
import time
import common
headers = {"Content-Type" : "application/vnd.netbackup+json;version=4.0"}
# Perform bulk restore
def perform_bulk_restore(baseurl, token, bulk_backup_job_id, workload_type, vcenter_name, client_restore_vm_prefix):
""" This function perform the bulk restore """
headers.update({'Authorization': token})
jobid_list = []
mount_id_list = []
job_mount_dict = {}
error_msg = ''
is_error = False
url = f"{baseurl}admin/jobs/?filter=parentJobId eq {str(bulk_backup_job_id)} and "\
f"jobId ne {str(bulk_backup_job_id)} and jobType eq 'SNAPSHOT' and "\
f"state eq 'DONE' and (status eq 0 or status eq 1)"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
for data in response_text["data"]:
jobid_list.append(data["id"])
print(f"Snapshot jobid list:[{(','.join(jobid_list))}]")
for jobid in jobid_list:
mount_id = ''
url = f"{baseurl}admin/jobs/?filter=parentJobId eq {str(jobid)} and state eq 'DONE'"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
backup_id = response_text['data'][0]['attributes']['backupId']
asset_id = response_text['data'][0]['attributes']['assetID']
asset_name = response_text['data'][0]['attributes']['assetDisplayableName']
print(f"Backup id for job:[{jobid}] is:[{backup_id}]")
print(f"asset id for job:[{jobid}] is:[{asset_id}]")
print(f"asset display name for job:[{jobid}] is:[{asset_name}]")
# Get asset info
asset_id, _, exsi_host = common.get_asset_info(baseurl, token, workload_type, asset_name)
resource_pool = get_resource_pool(baseurl, token, workload_type, vcenter_name, exsi_host)
print(f"Resource pool:[{resource_pool}]")
restore_vmname = f"{client_restore_vm_prefix}_{jobid}"
print(f"Restore vm name for jobid:[{jobid}] is:[{restore_vmname}]")
try:
mount_id = create_instant_access_vm(baseurl, token, workload_type,\
backup_id, vcenter_name, exsi_host, resource_pool, restore_vmname)
if mount_id:
mount_id_list.append(mount_id)
else:
error_msg = f"{error_msg} Unable to create the the instant VM for jobid:[{jobid}]"
is_error = True
except Exception as exc:
error_msg = f"{error_msg} Instant VM creation Exception for jobid:[{jobid}] is: {exc}"
is_error = True
for jobid, mount_id in job_mount_dict.items():
try:
verify_instant_access_vmstate(baseurl, token, workload_type, backup_id, mount_id)
except Exception as exc:
error_msg = f"{error_msg} Instant VM verification Exception for jobid:[{jobid}] is:{exc}"
is_error = True
mount_id_list_str = ",".join(mount_id_list)
print(f"Mount id list:[{mount_id_list_str}]")
if is_error:
raise Exception(error_msg)
return mount_id_list_str
# Get vm recovery points
def get_recovery_points(baseurl, token, workload_type, asset_id):
""" This function return the recovery point of given asset """
print(f"Get the recovery points for asset:[{asset_id}]")
headers.update({'Authorization': token})
url = f"{baseurl}recovery-point-service/workloads/{workload_type}/"\
f"recovery-points?filter=assetId eq '{asset_id}'"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
backup_id = response_text['data'][0]['id']
return backup_id
# Get resource pool of vcenter exsi
def get_resource_pool(baseurl, token, workload_type, vcenter_name, exsi_host):
""" This function return the resource pool info of vcenter and exsi host """
headers.update({'Authorization': token})
url = f"{baseurl}/config/workloads/{workload_type}/vcenters/"\
f"{vcenter_name}/esxiservers/{exsi_host}/resource-pools"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
resource_pool = response_text['data']['attributes']['resourcePools'][0]['path']
return resource_pool
# Create instant access VM
def create_instant_access_vm(baseurl, token, workload_type, backup_id, vcenter_name, exsi_host, resource_pool, client_restore_name):
""" This function create the instant access VM """
print(f"Instant restore is initiated:[{client_restore_name}]")
headers.update({'Authorization': token})
payload = {
"data": {
"type": "instantAccessVmV3",
"attributes": {
"backupId": backup_id,
"copyNumber": 1,
"vCenter": vcenter_name,
"esxiHost": exsi_host,
"resourcePoolOrVapp": resource_pool,
"vmName": client_restore_name,
"powerOn": "True",
"removeEthCards": "False",
"retention": {
"value": 30,
"unit": "DAYS"
},
},
}
}
url = f"{baseurl}recovery/workloads/{workload_type}/instant-access-vms"
status_code, response_text = common.rest_request('POST', url, headers, data=payload)
common.validate_response(status_code, 201, response_text)
mount_id = response_text['data']['id']
return mount_id
# Get instant access VM state
def get_instantaccess_vmstate(baseurl, token, workload_type, mount_id):
""" This function return state of instant access VM """
headers.update({'Authorization': token})
url = f"{baseurl}recovery/workloads/{workload_type}/instant-access-vms/{mount_id}"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
status = response_text['data']['attributes']['status']
return status
# Verify instant access VM state
def verify_instant_access_vmstate(baseurl, token, workload_type, backup_id, mount_id, timeout=600):
""" This function verify the 'ACTIVE' state of access VM """
# Get Vmware server discovery status
print("Verify the instant access VM state")
inst_access_vmstatus = ''
end_time = time.time() + timeout
while time.time() < end_time:
time.sleep(20)
inst_access_vmstatus = get_instantaccess_vmstate(baseurl, token, workload_type, mount_id)
if inst_access_vmstatus == 'ACTIVE':
print("Restore Successful")
break
else:
print(f"Restore is failed of backup:[{backup_id}] with status:[{inst_access_vmstatus}]")
raise Exception(f"Restore is failed of backup:[{backup_id}] with status:[{inst_access_vmstatus}]")
print(f"Verified instant access restore status:[{inst_access_vmstatus}]")
return mount_id
# Remove instant access VM
def remove_instantaccess_vm(baseurl, token, mount_id):
""" This function remove the instant access VM"""
if mount_id:
headers.update({'Authorization': token})
url = f"{baseurl}recovery/workloads/vmware/instant-access-vms/{mount_id}"
status_code, response_text = common.rest_request('DELETE', url, headers)
common.validate_response(status_code, 204, response_text)
print(f"Successfully removed instant access vm:[{mount_id}]") | recipes/python/backup-restore/vm_restore.py |
## The script can be run with Python 3.6 or higher version.
## The script requires 'requests' library to make the API calls.
import time
import common
headers = {"Content-Type" : "application/vnd.netbackup+json;version=4.0"}
# Perform bulk restore
def perform_bulk_restore(baseurl, token, bulk_backup_job_id, workload_type, vcenter_name, client_restore_vm_prefix):
""" This function perform the bulk restore """
headers.update({'Authorization': token})
jobid_list = []
mount_id_list = []
job_mount_dict = {}
error_msg = ''
is_error = False
url = f"{baseurl}admin/jobs/?filter=parentJobId eq {str(bulk_backup_job_id)} and "\
f"jobId ne {str(bulk_backup_job_id)} and jobType eq 'SNAPSHOT' and "\
f"state eq 'DONE' and (status eq 0 or status eq 1)"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
for data in response_text["data"]:
jobid_list.append(data["id"])
print(f"Snapshot jobid list:[{(','.join(jobid_list))}]")
for jobid in jobid_list:
mount_id = ''
url = f"{baseurl}admin/jobs/?filter=parentJobId eq {str(jobid)} and state eq 'DONE'"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
backup_id = response_text['data'][0]['attributes']['backupId']
asset_id = response_text['data'][0]['attributes']['assetID']
asset_name = response_text['data'][0]['attributes']['assetDisplayableName']
print(f"Backup id for job:[{jobid}] is:[{backup_id}]")
print(f"asset id for job:[{jobid}] is:[{asset_id}]")
print(f"asset display name for job:[{jobid}] is:[{asset_name}]")
# Get asset info
asset_id, _, exsi_host = common.get_asset_info(baseurl, token, workload_type, asset_name)
resource_pool = get_resource_pool(baseurl, token, workload_type, vcenter_name, exsi_host)
print(f"Resource pool:[{resource_pool}]")
restore_vmname = f"{client_restore_vm_prefix}_{jobid}"
print(f"Restore vm name for jobid:[{jobid}] is:[{restore_vmname}]")
try:
mount_id = create_instant_access_vm(baseurl, token, workload_type,\
backup_id, vcenter_name, exsi_host, resource_pool, restore_vmname)
if mount_id:
mount_id_list.append(mount_id)
else:
error_msg = f"{error_msg} Unable to create the the instant VM for jobid:[{jobid}]"
is_error = True
except Exception as exc:
error_msg = f"{error_msg} Instant VM creation Exception for jobid:[{jobid}] is: {exc}"
is_error = True
for jobid, mount_id in job_mount_dict.items():
try:
verify_instant_access_vmstate(baseurl, token, workload_type, backup_id, mount_id)
except Exception as exc:
error_msg = f"{error_msg} Instant VM verification Exception for jobid:[{jobid}] is:{exc}"
is_error = True
mount_id_list_str = ",".join(mount_id_list)
print(f"Mount id list:[{mount_id_list_str}]")
if is_error:
raise Exception(error_msg)
return mount_id_list_str
# Get vm recovery points
def get_recovery_points(baseurl, token, workload_type, asset_id):
""" This function return the recovery point of given asset """
print(f"Get the recovery points for asset:[{asset_id}]")
headers.update({'Authorization': token})
url = f"{baseurl}recovery-point-service/workloads/{workload_type}/"\
f"recovery-points?filter=assetId eq '{asset_id}'"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
backup_id = response_text['data'][0]['id']
return backup_id
# Get resource pool of vcenter exsi
def get_resource_pool(baseurl, token, workload_type, vcenter_name, exsi_host):
""" This function return the resource pool info of vcenter and exsi host """
headers.update({'Authorization': token})
url = f"{baseurl}/config/workloads/{workload_type}/vcenters/"\
f"{vcenter_name}/esxiservers/{exsi_host}/resource-pools"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
resource_pool = response_text['data']['attributes']['resourcePools'][0]['path']
return resource_pool
# Create instant access VM
def create_instant_access_vm(baseurl, token, workload_type, backup_id, vcenter_name, exsi_host, resource_pool, client_restore_name):
""" This function create the instant access VM """
print(f"Instant restore is initiated:[{client_restore_name}]")
headers.update({'Authorization': token})
payload = {
"data": {
"type": "instantAccessVmV3",
"attributes": {
"backupId": backup_id,
"copyNumber": 1,
"vCenter": vcenter_name,
"esxiHost": exsi_host,
"resourcePoolOrVapp": resource_pool,
"vmName": client_restore_name,
"powerOn": "True",
"removeEthCards": "False",
"retention": {
"value": 30,
"unit": "DAYS"
},
},
}
}
url = f"{baseurl}recovery/workloads/{workload_type}/instant-access-vms"
status_code, response_text = common.rest_request('POST', url, headers, data=payload)
common.validate_response(status_code, 201, response_text)
mount_id = response_text['data']['id']
return mount_id
# Get instant access VM state
def get_instantaccess_vmstate(baseurl, token, workload_type, mount_id):
""" This function return state of instant access VM """
headers.update({'Authorization': token})
url = f"{baseurl}recovery/workloads/{workload_type}/instant-access-vms/{mount_id}"
status_code, response_text = common.rest_request('GET', url, headers)
common.validate_response(status_code, 200, response_text)
status = response_text['data']['attributes']['status']
return status
# Verify instant access VM state
def verify_instant_access_vmstate(baseurl, token, workload_type, backup_id, mount_id, timeout=600):
""" This function verify the 'ACTIVE' state of access VM """
# Get Vmware server discovery status
print("Verify the instant access VM state")
inst_access_vmstatus = ''
end_time = time.time() + timeout
while time.time() < end_time:
time.sleep(20)
inst_access_vmstatus = get_instantaccess_vmstate(baseurl, token, workload_type, mount_id)
if inst_access_vmstatus == 'ACTIVE':
print("Restore Successful")
break
else:
print(f"Restore is failed of backup:[{backup_id}] with status:[{inst_access_vmstatus}]")
raise Exception(f"Restore is failed of backup:[{backup_id}] with status:[{inst_access_vmstatus}]")
print(f"Verified instant access restore status:[{inst_access_vmstatus}]")
return mount_id
# Remove instant access VM
def remove_instantaccess_vm(baseurl, token, mount_id):
""" This function remove the instant access VM"""
if mount_id:
headers.update({'Authorization': token})
url = f"{baseurl}recovery/workloads/vmware/instant-access-vms/{mount_id}"
status_code, response_text = common.rest_request('DELETE', url, headers)
common.validate_response(status_code, 204, response_text)
print(f"Successfully removed instant access vm:[{mount_id}]") | 0.510985 | 0.110735 |
import numpy as np
import os
import torch
import torchvision.models as models
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import sys
import math
import torch.nn.init as init
import logging
from torch.nn.parameter import Parameter
from subnet import *
import torchac
def save_model(model, iter):
torch.save(model.state_dict(), "./snapshot/iter{}.model".format(iter))
def load_model(model, f):
with open(f, 'rb') as f:
pretrained_dict = torch.load(f)
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
f = str(f)
if f.find('iter') != -1 and f.find('.model') != -1:
st = f.find('iter') + 4
ed = f.find('.model', st)
return int(f[st:ed])
else:
return 0
class VideoCompressor(nn.Module):
def __init__(self):
super(VideoCompressor, self).__init__()
# self.imageCompressor = ImageCompressor()
self.opticFlow = ME_Spynet()
self.mvEncoder = Analysis_mv_net()
self.Q = None
self.mvDecoder = Synthesis_mv_net()
self.warpnet = Warp_net()
self.resEncoder = Analysis_net()
self.resDecoder = Synthesis_net()
self.respriorEncoder = Analysis_prior_net()
self.respriorDecoder = Synthesis_prior_net()
self.bitEstimator_z = BitEstimator(out_channel_N)
self.bitEstimator_mv = BitEstimator(out_channel_mv)
# self.flow_warp = Resample2d()
# self.bitEstimator_feature = BitEstimator(out_channel_M)
self.warp_weight = 0
self.mxrange = 150
self.calrealbits = False
def forwardFirstFrame(self, x):
output, bittrans = self.imageCompressor(x)
cost = self.bitEstimator(bittrans)
return output, cost
def motioncompensation(self, ref, mv):
warpframe = flow_warp(ref, mv)
inputfeature = torch.cat((warpframe, ref), 1)
prediction = self.warpnet(inputfeature) + warpframe
return prediction, warpframe
def forward(self, input_image, referframe, quant_noise_feature=None, quant_noise_z=None, quant_noise_mv=None):
estmv = self.opticFlow(input_image, referframe)
mvfeature = self.mvEncoder(estmv)
if self.training:
quant_mv = mvfeature + quant_noise_mv
else:
quant_mv = torch.round(mvfeature)
quant_mv_upsample = self.mvDecoder(quant_mv)
prediction, warpframe = self.motioncompensation(referframe, quant_mv_upsample)
input_residual = input_image - prediction
feature = self.resEncoder(input_residual)
batch_size = feature.size()[0]
z = self.respriorEncoder(feature)
if self.training:
compressed_z = z + quant_noise_z
else:
compressed_z = torch.round(z)
recon_sigma = self.respriorDecoder(compressed_z)
feature_renorm = feature
if self.training:
compressed_feature_renorm = feature_renorm + quant_noise_feature
else:
compressed_feature_renorm = torch.round(feature_renorm)
recon_res = self.resDecoder(compressed_feature_renorm)
recon_image = prediction + recon_res
clipped_recon_image = recon_image.clamp(0., 1.)
# distortion
mse_loss = torch.mean((recon_image - input_image).pow(2))
# psnr = tf.cond(
# tf.equal(mse_loss, 0), lambda: tf.constant(100, dtype=tf.float32),
# lambda: 10 * (tf.log(1 * 1 / mse_loss) / np.log(10)))
warploss = torch.mean((warpframe - input_image).pow(2))
interloss = torch.mean((prediction - input_image).pow(2))
# bit per pixel
def feature_probs_based_sigma(feature, sigma):
def getrealbitsg(x, gaussian):
# print("NIPS18noc : mn : ", torch.min(x), " - mx : ", torch.max(x), " range : ", self.mxrange)
cdfs = []
x = x + self.mxrange
n,c,h,w = x.shape
for i in range(-self.mxrange, self.mxrange):
cdfs.append(gaussian.cdf(i - 0.5).view(n,c,h,w,1))
cdfs = torch.cat(cdfs, 4).cpu().detach()
byte_stream = torchac.encode_float_cdf(cdfs, x.cpu().detach().to(torch.int16), check_input_bounds=True)
real_bits = torch.from_numpy(np.array([len(byte_stream) * 8])).float().cuda()
sym_out = torchac.decode_float_cdf(cdfs, byte_stream)
return sym_out - self.mxrange, real_bits
mu = torch.zeros_like(sigma)
sigma = sigma.clamp(1e-5, 1e10)
gaussian = torch.distributions.laplace.Laplace(mu, sigma)
probs = gaussian.cdf(feature + 0.5) - gaussian.cdf(feature - 0.5)
total_bits = torch.sum(torch.clamp(-1.0 * torch.log(probs + 1e-5) / math.log(2.0), 0, 50))
if self.calrealbits and not self.training:
decodedx, real_bits = getrealbitsg(feature, gaussian)
total_bits = real_bits
return total_bits, probs
def iclr18_estrate_bits_z(z):
def getrealbits(x):
cdfs = []
x = x + self.mxrange
n,c,h,w = x.shape
for i in range(-self.mxrange, self.mxrange):
cdfs.append(self.bitEstimator_z(i - 0.5).view(1, c, 1, 1, 1).repeat(1, 1, h, w, 1))
cdfs = torch.cat(cdfs, 4).cpu().detach()
byte_stream = torchac.encode_float_cdf(cdfs, x.cpu().detach().to(torch.int16), check_input_bounds=True)
real_bits = torch.sum(torch.from_numpy(np.array([len(byte_stream) * 8])).float().cuda())
sym_out = torchac.decode_float_cdf(cdfs, byte_stream)
return sym_out - self.mxrange, real_bits
prob = self.bitEstimator_z(z + 0.5) - self.bitEstimator_z(z - 0.5)
total_bits = torch.sum(torch.clamp(-1.0 * torch.log(prob + 1e-5) / math.log(2.0), 0, 50))
if self.calrealbits and not self.training:
decodedx, real_bits = getrealbits(z)
total_bits = real_bits
return total_bits, prob
def iclr18_estrate_bits_mv(mv):
def getrealbits(x):
cdfs = []
x = x + self.mxrange
n,c,h,w = x.shape
for i in range(-self.mxrange, self.mxrange):
cdfs.append(self.bitEstimator_mv(i - 0.5).view(1, c, 1, 1, 1).repeat(1, 1, h, w, 1))
cdfs = torch.cat(cdfs, 4).cpu().detach()
byte_stream = torchac.encode_float_cdf(cdfs, x.cpu().detach().to(torch.int16), check_input_bounds=True)
real_bits = torch.sum(torch.from_numpy(np.array([len(byte_stream) * 8])).float().cuda())
sym_out = torchac.decode_float_cdf(cdfs, byte_stream)
return sym_out - self.mxrange, real_bits
prob = self.bitEstimator_mv(mv + 0.5) - self.bitEstimator_mv(mv - 0.5)
total_bits = torch.sum(torch.clamp(-1.0 * torch.log(prob + 1e-5) / math.log(2.0), 0, 50))
if self.calrealbits and not self.training:
decodedx, real_bits = getrealbits(mv)
total_bits = real_bits
return total_bits, prob
total_bits_feature, _ = feature_probs_based_sigma(compressed_feature_renorm, recon_sigma)
# entropy_context = entropy_context_from_sigma(compressed_feature_renorm, recon_sigma)
total_bits_z, _ = iclr18_estrate_bits_z(compressed_z)
total_bits_mv, _ = iclr18_estrate_bits_mv(quant_mv)
im_shape = input_image.size()
bpp_feature = total_bits_feature / (batch_size * im_shape[2] * im_shape[3])
bpp_z = total_bits_z / (batch_size * im_shape[2] * im_shape[3])
bpp_mv = total_bits_mv / (batch_size * im_shape[2] * im_shape[3])
bpp = bpp_feature + bpp_z + bpp_mv
return clipped_recon_image, mse_loss, warploss, interloss, bpp_feature, bpp_z, bpp_mv, bpp | DVC/net.py | import numpy as np
import os
import torch
import torchvision.models as models
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import sys
import math
import torch.nn.init as init
import logging
from torch.nn.parameter import Parameter
from subnet import *
import torchac
def save_model(model, iter):
torch.save(model.state_dict(), "./snapshot/iter{}.model".format(iter))
def load_model(model, f):
with open(f, 'rb') as f:
pretrained_dict = torch.load(f)
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
f = str(f)
if f.find('iter') != -1 and f.find('.model') != -1:
st = f.find('iter') + 4
ed = f.find('.model', st)
return int(f[st:ed])
else:
return 0
class VideoCompressor(nn.Module):
def __init__(self):
super(VideoCompressor, self).__init__()
# self.imageCompressor = ImageCompressor()
self.opticFlow = ME_Spynet()
self.mvEncoder = Analysis_mv_net()
self.Q = None
self.mvDecoder = Synthesis_mv_net()
self.warpnet = Warp_net()
self.resEncoder = Analysis_net()
self.resDecoder = Synthesis_net()
self.respriorEncoder = Analysis_prior_net()
self.respriorDecoder = Synthesis_prior_net()
self.bitEstimator_z = BitEstimator(out_channel_N)
self.bitEstimator_mv = BitEstimator(out_channel_mv)
# self.flow_warp = Resample2d()
# self.bitEstimator_feature = BitEstimator(out_channel_M)
self.warp_weight = 0
self.mxrange = 150
self.calrealbits = False
def forwardFirstFrame(self, x):
output, bittrans = self.imageCompressor(x)
cost = self.bitEstimator(bittrans)
return output, cost
def motioncompensation(self, ref, mv):
warpframe = flow_warp(ref, mv)
inputfeature = torch.cat((warpframe, ref), 1)
prediction = self.warpnet(inputfeature) + warpframe
return prediction, warpframe
def forward(self, input_image, referframe, quant_noise_feature=None, quant_noise_z=None, quant_noise_mv=None):
estmv = self.opticFlow(input_image, referframe)
mvfeature = self.mvEncoder(estmv)
if self.training:
quant_mv = mvfeature + quant_noise_mv
else:
quant_mv = torch.round(mvfeature)
quant_mv_upsample = self.mvDecoder(quant_mv)
prediction, warpframe = self.motioncompensation(referframe, quant_mv_upsample)
input_residual = input_image - prediction
feature = self.resEncoder(input_residual)
batch_size = feature.size()[0]
z = self.respriorEncoder(feature)
if self.training:
compressed_z = z + quant_noise_z
else:
compressed_z = torch.round(z)
recon_sigma = self.respriorDecoder(compressed_z)
feature_renorm = feature
if self.training:
compressed_feature_renorm = feature_renorm + quant_noise_feature
else:
compressed_feature_renorm = torch.round(feature_renorm)
recon_res = self.resDecoder(compressed_feature_renorm)
recon_image = prediction + recon_res
clipped_recon_image = recon_image.clamp(0., 1.)
# distortion
mse_loss = torch.mean((recon_image - input_image).pow(2))
# psnr = tf.cond(
# tf.equal(mse_loss, 0), lambda: tf.constant(100, dtype=tf.float32),
# lambda: 10 * (tf.log(1 * 1 / mse_loss) / np.log(10)))
warploss = torch.mean((warpframe - input_image).pow(2))
interloss = torch.mean((prediction - input_image).pow(2))
# bit per pixel
def feature_probs_based_sigma(feature, sigma):
def getrealbitsg(x, gaussian):
# print("NIPS18noc : mn : ", torch.min(x), " - mx : ", torch.max(x), " range : ", self.mxrange)
cdfs = []
x = x + self.mxrange
n,c,h,w = x.shape
for i in range(-self.mxrange, self.mxrange):
cdfs.append(gaussian.cdf(i - 0.5).view(n,c,h,w,1))
cdfs = torch.cat(cdfs, 4).cpu().detach()
byte_stream = torchac.encode_float_cdf(cdfs, x.cpu().detach().to(torch.int16), check_input_bounds=True)
real_bits = torch.from_numpy(np.array([len(byte_stream) * 8])).float().cuda()
sym_out = torchac.decode_float_cdf(cdfs, byte_stream)
return sym_out - self.mxrange, real_bits
mu = torch.zeros_like(sigma)
sigma = sigma.clamp(1e-5, 1e10)
gaussian = torch.distributions.laplace.Laplace(mu, sigma)
probs = gaussian.cdf(feature + 0.5) - gaussian.cdf(feature - 0.5)
total_bits = torch.sum(torch.clamp(-1.0 * torch.log(probs + 1e-5) / math.log(2.0), 0, 50))
if self.calrealbits and not self.training:
decodedx, real_bits = getrealbitsg(feature, gaussian)
total_bits = real_bits
return total_bits, probs
def iclr18_estrate_bits_z(z):
def getrealbits(x):
cdfs = []
x = x + self.mxrange
n,c,h,w = x.shape
for i in range(-self.mxrange, self.mxrange):
cdfs.append(self.bitEstimator_z(i - 0.5).view(1, c, 1, 1, 1).repeat(1, 1, h, w, 1))
cdfs = torch.cat(cdfs, 4).cpu().detach()
byte_stream = torchac.encode_float_cdf(cdfs, x.cpu().detach().to(torch.int16), check_input_bounds=True)
real_bits = torch.sum(torch.from_numpy(np.array([len(byte_stream) * 8])).float().cuda())
sym_out = torchac.decode_float_cdf(cdfs, byte_stream)
return sym_out - self.mxrange, real_bits
prob = self.bitEstimator_z(z + 0.5) - self.bitEstimator_z(z - 0.5)
total_bits = torch.sum(torch.clamp(-1.0 * torch.log(prob + 1e-5) / math.log(2.0), 0, 50))
if self.calrealbits and not self.training:
decodedx, real_bits = getrealbits(z)
total_bits = real_bits
return total_bits, prob
def iclr18_estrate_bits_mv(mv):
def getrealbits(x):
cdfs = []
x = x + self.mxrange
n,c,h,w = x.shape
for i in range(-self.mxrange, self.mxrange):
cdfs.append(self.bitEstimator_mv(i - 0.5).view(1, c, 1, 1, 1).repeat(1, 1, h, w, 1))
cdfs = torch.cat(cdfs, 4).cpu().detach()
byte_stream = torchac.encode_float_cdf(cdfs, x.cpu().detach().to(torch.int16), check_input_bounds=True)
real_bits = torch.sum(torch.from_numpy(np.array([len(byte_stream) * 8])).float().cuda())
sym_out = torchac.decode_float_cdf(cdfs, byte_stream)
return sym_out - self.mxrange, real_bits
prob = self.bitEstimator_mv(mv + 0.5) - self.bitEstimator_mv(mv - 0.5)
total_bits = torch.sum(torch.clamp(-1.0 * torch.log(prob + 1e-5) / math.log(2.0), 0, 50))
if self.calrealbits and not self.training:
decodedx, real_bits = getrealbits(mv)
total_bits = real_bits
return total_bits, prob
total_bits_feature, _ = feature_probs_based_sigma(compressed_feature_renorm, recon_sigma)
# entropy_context = entropy_context_from_sigma(compressed_feature_renorm, recon_sigma)
total_bits_z, _ = iclr18_estrate_bits_z(compressed_z)
total_bits_mv, _ = iclr18_estrate_bits_mv(quant_mv)
im_shape = input_image.size()
bpp_feature = total_bits_feature / (batch_size * im_shape[2] * im_shape[3])
bpp_z = total_bits_z / (batch_size * im_shape[2] * im_shape[3])
bpp_mv = total_bits_mv / (batch_size * im_shape[2] * im_shape[3])
bpp = bpp_feature + bpp_z + bpp_mv
return clipped_recon_image, mse_loss, warploss, interloss, bpp_feature, bpp_z, bpp_mv, bpp | 0.598547 | 0.355747 |
from boto import exception
from django.core.exceptions import ValidationError
from flask import request
from rest_framework import status as http_status
import addons.myminio.settings as settings
from addons.base import generic_views
from addons.myminio import SHORT_NAME, FULL_NAME
from addons.myminio import utils
from addons.myminio.serializer import MyMinIOSerializer
from admin.rdm_addons.decorators import must_be_rdm_addons_allowed
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from osf.models import ExternalAccount
from website.project.decorators import (
must_have_addon, must_have_permission,
must_be_addon_authorizer,
)
myminio_account_list = generic_views.account_list(
SHORT_NAME,
MyMinIOSerializer
)
myminio_import_auth = generic_views.import_auth(
SHORT_NAME,
MyMinIOSerializer
)
myminio_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
myminio_get_config = generic_views.get_config(
SHORT_NAME,
MyMinIOSerializer
)
def _set_folder(node_addon, folder, auth):
folder_id = folder['id']
node_addon.set_folder(folder_id, auth=auth)
node_addon.save()
myminio_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
MyMinIOSerializer,
_set_folder
)
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def myminio_folder_list(node_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
"""
return node_addon.get_folders()
@must_be_logged_in
@must_be_rdm_addons_allowed(SHORT_NAME)
def myminio_add_user_account(auth, **kwargs):
"""Verifies new external account credentials and adds to user's list"""
host = settings.HOST
try:
access_key = request.json['access_key']
secret_key = request.json['secret_key']
except KeyError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
if not (access_key and secret_key):
return {
'message': 'All the fields above are required.'
}, http_status.HTTP_400_BAD_REQUEST
user_info = utils.get_user_info(host, access_key, secret_key)
if not user_info:
return {
'message': ('Unable to access account.\n'
'Check to make sure that the above credentials are valid, '
'and that they have permission to list buckets.')
}, http_status.HTTP_400_BAD_REQUEST
if not utils.can_list(host, access_key, secret_key):
return {
'message': ('Unable to list buckets.\n'
'Listing buckets is required permission that can be changed via IAM')
}, http_status.HTTP_400_BAD_REQUEST
try:
account = ExternalAccount(
provider=SHORT_NAME,
provider_name=FULL_NAME,
oauth_key=access_key,
oauth_secret=secret_key,
provider_id=user_info.id,
display_name=user_info.display_name,
)
account.save()
except ValidationError:
# ... or get the old one
account = ExternalAccount.objects.get(
provider=SHORT_NAME,
provider_id=user_info.id
)
if account.oauth_key != access_key or account.oauth_secret != secret_key:
account.oauth_key = access_key
account.oauth_secret = secret_key
account.save()
assert account is not None
if not auth.user.external_accounts.filter(id=account.id).exists():
auth.user.external_accounts.add(account)
# Ensure My MinIO is enabled.
auth.user.get_or_add_addon('myminio', auth=auth)
auth.user.save()
return {}
@must_be_addon_authorizer(SHORT_NAME)
@must_have_addon('myminio', 'node')
@must_have_permission('write')
def myminio_create_bucket(auth, node_addon, **kwargs):
bucket_name = request.json.get('bucket_name', '')
# bucket_location = request.json.get('bucket_location', '')
if not utils.validate_bucket_name(bucket_name):
return {
'message': 'That bucket name is not valid.',
'title': 'Invalid bucket name',
}, http_status.HTTP_400_BAD_REQUEST
try:
# utils.create_bucket(node_addon, bucket_name, bucket_location)
utils.create_bucket(node_addon, bucket_name)
except exception.S3ResponseError as e:
return {
'message': e.message,
'title': 'Problem connecting to My MinIO',
}, http_status.HTTP_400_BAD_REQUEST
except exception.S3CreateError as e:
return {
'message': e.message,
'title': "Problem creating bucket '{0}'".format(bucket_name),
}, http_status.HTTP_400_BAD_REQUEST
except exception.BotoClientError as e: # Base class catchall
return {
'message': e.message,
'title': 'Error connecting to My MinIO',
}, http_status.HTTP_400_BAD_REQUEST
return {} | StorageAddon/osf.io/addon/views.py | from boto import exception
from django.core.exceptions import ValidationError
from flask import request
from rest_framework import status as http_status
import addons.myminio.settings as settings
from addons.base import generic_views
from addons.myminio import SHORT_NAME, FULL_NAME
from addons.myminio import utils
from addons.myminio.serializer import MyMinIOSerializer
from admin.rdm_addons.decorators import must_be_rdm_addons_allowed
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from osf.models import ExternalAccount
from website.project.decorators import (
must_have_addon, must_have_permission,
must_be_addon_authorizer,
)
myminio_account_list = generic_views.account_list(
SHORT_NAME,
MyMinIOSerializer
)
myminio_import_auth = generic_views.import_auth(
SHORT_NAME,
MyMinIOSerializer
)
myminio_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
myminio_get_config = generic_views.get_config(
SHORT_NAME,
MyMinIOSerializer
)
def _set_folder(node_addon, folder, auth):
folder_id = folder['id']
node_addon.set_folder(folder_id, auth=auth)
node_addon.save()
myminio_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
MyMinIOSerializer,
_set_folder
)
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def myminio_folder_list(node_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
"""
return node_addon.get_folders()
@must_be_logged_in
@must_be_rdm_addons_allowed(SHORT_NAME)
def myminio_add_user_account(auth, **kwargs):
"""Verifies new external account credentials and adds to user's list"""
host = settings.HOST
try:
access_key = request.json['access_key']
secret_key = request.json['secret_key']
except KeyError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
if not (access_key and secret_key):
return {
'message': 'All the fields above are required.'
}, http_status.HTTP_400_BAD_REQUEST
user_info = utils.get_user_info(host, access_key, secret_key)
if not user_info:
return {
'message': ('Unable to access account.\n'
'Check to make sure that the above credentials are valid, '
'and that they have permission to list buckets.')
}, http_status.HTTP_400_BAD_REQUEST
if not utils.can_list(host, access_key, secret_key):
return {
'message': ('Unable to list buckets.\n'
'Listing buckets is required permission that can be changed via IAM')
}, http_status.HTTP_400_BAD_REQUEST
try:
account = ExternalAccount(
provider=SHORT_NAME,
provider_name=FULL_NAME,
oauth_key=access_key,
oauth_secret=secret_key,
provider_id=user_info.id,
display_name=user_info.display_name,
)
account.save()
except ValidationError:
# ... or get the old one
account = ExternalAccount.objects.get(
provider=SHORT_NAME,
provider_id=user_info.id
)
if account.oauth_key != access_key or account.oauth_secret != secret_key:
account.oauth_key = access_key
account.oauth_secret = secret_key
account.save()
assert account is not None
if not auth.user.external_accounts.filter(id=account.id).exists():
auth.user.external_accounts.add(account)
# Ensure My MinIO is enabled.
auth.user.get_or_add_addon('myminio', auth=auth)
auth.user.save()
return {}
@must_be_addon_authorizer(SHORT_NAME)
@must_have_addon('myminio', 'node')
@must_have_permission('write')
def myminio_create_bucket(auth, node_addon, **kwargs):
bucket_name = request.json.get('bucket_name', '')
# bucket_location = request.json.get('bucket_location', '')
if not utils.validate_bucket_name(bucket_name):
return {
'message': 'That bucket name is not valid.',
'title': 'Invalid bucket name',
}, http_status.HTTP_400_BAD_REQUEST
try:
# utils.create_bucket(node_addon, bucket_name, bucket_location)
utils.create_bucket(node_addon, bucket_name)
except exception.S3ResponseError as e:
return {
'message': e.message,
'title': 'Problem connecting to My MinIO',
}, http_status.HTTP_400_BAD_REQUEST
except exception.S3CreateError as e:
return {
'message': e.message,
'title': "Problem creating bucket '{0}'".format(bucket_name),
}, http_status.HTTP_400_BAD_REQUEST
except exception.BotoClientError as e: # Base class catchall
return {
'message': e.message,
'title': 'Error connecting to My MinIO',
}, http_status.HTTP_400_BAD_REQUEST
return {} | 0.431345 | 0.049245 |
import pubmed_parser as pp
def test_parsing():
article_path = "PMC4266334.xml"
abs_phars = pp.parse_pubmed_paragraph(article_path, all_paragraph=True,
section='abs',
subscpt=["", ""],
supscpt=["", ""])
body_phars = pp.parse_pubmed_paragraph(article_path, all_paragraph=True,
section='body',
subscpt=["", ""],
supscpt=["", ""])
assert body_phars[0]['text'] == 'In the search for metal-based chemotherapeutics with improved properties with ' \
'respect to platinum-based drugs used in the clinic, ruthenium compounds have ' \
'emerged as promising candidates. Ruthenium complexes have certain ' \
'characteristics that make them attractive as potential chemotherapeutics for ' \
'different diseases. Ruthenium compounds can easily access three different ' \
'oxidation states (II, III, and possibly IV) in biological fluids. Ruthenium(III) ' \
'compounds could potentially behave as pro-drugs as they can be reduced to ' \
'ruthenium(II) derivatives in solid tumor masses where the low content in oxygen ' \
'may act as a reducing environment. As platinum-based drugs, ruthenium compounds ' \
'can exchange N and O-donor molecules with the added advantage of the possibility ' \
'of forming octahedral complexes (of interest in reactions with DNA). Lastly, ' \
'ruthenium derivatives probably use transferrin to accumulate into tumors due to ' \
'the similarities with iron. '
assert body_phars[3]['text'] == "We have reported that nontoxic iminophosphorane or iminophosphane (IM) compounds " \
"(R3P=N-R′, IM) are useful precursors for the preparation of coordination (N," \
"N−) or cyclometalated (C,N−) complexes of d8 metals (Au(III), Pd(II), " \
"and Pt(II)) mono- or heterometallic (selected compounds a–g in Chart 2). "
assert abs_phars[0]['text'] == 'A series of organometallic ruthenium(II) complexes containing iminophosphorane ' \
'ligands have been synthesized and characterized. Cationic compounds with chloride ' \
'as counterion are soluble in water (70–100 mg/mL). Most compounds (especially ' \
'highly water-soluble 2) are more cytotoxic to a number of human cancer cell lines ' \
'than cisplatin. Initial mechanistic studies indicate that the cell death type for ' \
'these compounds is mainly through canonical or caspase-dependent apoptosis, ' \
'nondependent on p53, and that the compounds do not interact with DNA or inhibit ' \
'protease cathepsin B. In vivo experiments of 2 on MDA-MB-231 xenografts in ' \
'NOD.CB17-Prkdc SCID/J mice showed an impressive tumor reduction (shrinkage) of ' \
'56% after 28 days of treatment (14 doses of 5 mg/kg every other day) with low ' \
'systemic toxicity. Pharmacokinetic studies showed a quick absorption of 2 in ' \
'plasma with preferential accumulation in the breast tumor tissues when compared ' \
'to kidney and liver, which may explain its high efficacy in vivo. '
def test_tagged_abstract_parsing():
all_parsed = pp.parse_medline_xml("15915352.xml", subscpt=["", ""], supscpt=["", ""], year_info_only=False)
assert all_parsed[0]['abstract'] == "Carbamazepine (CBZ) undergoes biotransformation by CYP3A4 and CYP2C8, " \
"and glucuronide conjugation. There has been no clear demonstration to reveal " \
"the role of glucuronidation in the disposition of CBZ. We evaluated the " \
"effect of probenecid, a UDP-glucuronosyltransferase inhibitor, " \
"on the pharmacokinetics of CBZ in humans. In a randomized, open-label, " \
"two-way crossover study, ten healthy male subjects were treated twice daily " \
"for 10 days with 500 mg probenecid or with a matched placebo. On day 6, " \
"a single dose of 200 mg CBZ was administered orally. Concentrations of CBZ " \
"and CBZ 10,11-epoxide (CBZ-E) in plasma and urine were measured. Probenecid " \
"decreased the area under the plasma concentration-time curve (AUC) of CBZ " \
"from 1253.9 micromol h/l to 1020.7 micromol h/l (P < 0.001) while increasing " \
"that of CBZ-E from 137.6 micromol h/l to 183.5 micromol h/l (P = 0.033). The " \
"oral clearance of CBZ was increased by probenecid by 26% (90% confidence " \
"interval, 17-34%; P < 0.001). Probenecid increased the AUC ratio of " \
"CBZ-E/CBZ from 0.11 to 0.16 (P < 0.001). However, probenecid had minimal " \
"effect on the recovery of the conjugated and free forms of CBZ and CBZ-E in " \
"urine. Although probenecid showed a minimal effect on the glucuronidation of " \
"CBZ and CBZ-E, it increased CBZ biotransformation to CBZ-E, most likely " \
"reflecting the induction of CYP3A4 and CYP2C8 activities, in humans. These " \
"results demonstrate that glucuronide conjugation plays a minor role in the " \
"metabolism of CBZ and CBZ-E in humans, and that probenecid has an inducing " \
"effect on the disposition of CBZ." | tests/test_paragraph_parsing.py | import pubmed_parser as pp
def test_parsing():
article_path = "PMC4266334.xml"
abs_phars = pp.parse_pubmed_paragraph(article_path, all_paragraph=True,
section='abs',
subscpt=["", ""],
supscpt=["", ""])
body_phars = pp.parse_pubmed_paragraph(article_path, all_paragraph=True,
section='body',
subscpt=["", ""],
supscpt=["", ""])
assert body_phars[0]['text'] == 'In the search for metal-based chemotherapeutics with improved properties with ' \
'respect to platinum-based drugs used in the clinic, ruthenium compounds have ' \
'emerged as promising candidates. Ruthenium complexes have certain ' \
'characteristics that make them attractive as potential chemotherapeutics for ' \
'different diseases. Ruthenium compounds can easily access three different ' \
'oxidation states (II, III, and possibly IV) in biological fluids. Ruthenium(III) ' \
'compounds could potentially behave as pro-drugs as they can be reduced to ' \
'ruthenium(II) derivatives in solid tumor masses where the low content in oxygen ' \
'may act as a reducing environment. As platinum-based drugs, ruthenium compounds ' \
'can exchange N and O-donor molecules with the added advantage of the possibility ' \
'of forming octahedral complexes (of interest in reactions with DNA). Lastly, ' \
'ruthenium derivatives probably use transferrin to accumulate into tumors due to ' \
'the similarities with iron. '
assert body_phars[3]['text'] == "We have reported that nontoxic iminophosphorane or iminophosphane (IM) compounds " \
"(R3P=N-R′, IM) are useful precursors for the preparation of coordination (N," \
"N−) or cyclometalated (C,N−) complexes of d8 metals (Au(III), Pd(II), " \
"and Pt(II)) mono- or heterometallic (selected compounds a–g in Chart 2). "
assert abs_phars[0]['text'] == 'A series of organometallic ruthenium(II) complexes containing iminophosphorane ' \
'ligands have been synthesized and characterized. Cationic compounds with chloride ' \
'as counterion are soluble in water (70–100 mg/mL). Most compounds (especially ' \
'highly water-soluble 2) are more cytotoxic to a number of human cancer cell lines ' \
'than cisplatin. Initial mechanistic studies indicate that the cell death type for ' \
'these compounds is mainly through canonical or caspase-dependent apoptosis, ' \
'nondependent on p53, and that the compounds do not interact with DNA or inhibit ' \
'protease cathepsin B. In vivo experiments of 2 on MDA-MB-231 xenografts in ' \
'NOD.CB17-Prkdc SCID/J mice showed an impressive tumor reduction (shrinkage) of ' \
'56% after 28 days of treatment (14 doses of 5 mg/kg every other day) with low ' \
'systemic toxicity. Pharmacokinetic studies showed a quick absorption of 2 in ' \
'plasma with preferential accumulation in the breast tumor tissues when compared ' \
'to kidney and liver, which may explain its high efficacy in vivo. '
def test_tagged_abstract_parsing():
all_parsed = pp.parse_medline_xml("15915352.xml", subscpt=["", ""], supscpt=["", ""], year_info_only=False)
assert all_parsed[0]['abstract'] == "Carbamazepine (CBZ) undergoes biotransformation by CYP3A4 and CYP2C8, " \
"and glucuronide conjugation. There has been no clear demonstration to reveal " \
"the role of glucuronidation in the disposition of CBZ. We evaluated the " \
"effect of probenecid, a UDP-glucuronosyltransferase inhibitor, " \
"on the pharmacokinetics of CBZ in humans. In a randomized, open-label, " \
"two-way crossover study, ten healthy male subjects were treated twice daily " \
"for 10 days with 500 mg probenecid or with a matched placebo. On day 6, " \
"a single dose of 200 mg CBZ was administered orally. Concentrations of CBZ " \
"and CBZ 10,11-epoxide (CBZ-E) in plasma and urine were measured. Probenecid " \
"decreased the area under the plasma concentration-time curve (AUC) of CBZ " \
"from 1253.9 micromol h/l to 1020.7 micromol h/l (P < 0.001) while increasing " \
"that of CBZ-E from 137.6 micromol h/l to 183.5 micromol h/l (P = 0.033). The " \
"oral clearance of CBZ was increased by probenecid by 26% (90% confidence " \
"interval, 17-34%; P < 0.001). Probenecid increased the AUC ratio of " \
"CBZ-E/CBZ from 0.11 to 0.16 (P < 0.001). However, probenecid had minimal " \
"effect on the recovery of the conjugated and free forms of CBZ and CBZ-E in " \
"urine. Although probenecid showed a minimal effect on the glucuronidation of " \
"CBZ and CBZ-E, it increased CBZ biotransformation to CBZ-E, most likely " \
"reflecting the induction of CYP3A4 and CYP2C8 activities, in humans. These " \
"results demonstrate that glucuronide conjugation plays a minor role in the " \
"metabolism of CBZ and CBZ-E in humans, and that probenecid has an inducing " \
"effect on the disposition of CBZ." | 0.619932 | 0.633779 |
#Biblioteca para crear la interfaz gráfica
import tkinter as tk
#Función para correr un comando
from subprocess import call
#Módulo para crear hilos
import threading
#Módulo para interactuar con el sistema operativo
import os
#Módulo para obtener el tipo de archivo
import mimetypes
#Módulo para acceder a las variables del intérprete
import sys
mimetypes.init()
#Si se recibe un solo argumento significa que se quiere lanzar
#el menú principal
if (len(sys.argv)) == 1:
#Se revisa si hay una usb conectada usando la biblioteca os
#se listan los subdirectorios en /media/pi que es
#en donde se hace el montaje de los dispositivos usb
media = "/media/pi"
#Si no se ha conectado una usb es posible que la ruta /media/pi
#no exista, por tal motivo se usa un bloque try
try:
subDirs = [dir.name for dir in os.scandir(media) if dir.is_dir()]
except:
subDirs = []
usb = []
#Definiendo las aplicaciones que se pueden utilizar
#["Nombre de App", "comando"]
#Algunos ejemplos de los servicios de streaming que se pueden usar:
#netflix = ["Netflix", ["./start.sh", "www.netflix.com"]]
#amazonPrime = ["Amazon", ["./start.sh", "www.primevideo.com"]]
mubi = ["Mubi", ["./start.sh", "www.mubi.com"]]
spotify = ["Spotify", ["./start.sh", "http://open.spotify.com"]]
#Si se encontró al menos un subdirectorio en /media/pi significa que
#hay al menos una usb conectada. En este caso el código está hecho para
#reconocer solo una usb pero se puede adaptar para más
if len(subDirs) > 0:
#Se obtiene el nombre de la usb
for name in subDirs:
subdir = os.path.join(media, name)
#Se obtienen los archivos en la usb
files = [f for f in os.scandir(subdir) if f.is_file()]
mediaType = []
fileTypes= []
#Se obtiene el tipo de archivo para cada archivo
for f in files:
fileType = mimetypes.guess_type(os.path.join(subdir, f))[0]
if fileType != None:
fileType = fileType.split("/")[0]
fileTypes.append(fileType)
mediaType.append(fileType) if fileType not in mediaType else mediaType
#Se crean nuevas opciones para el menú dependiendo del tipo de archivo
files = [f.name for f in files]
if len(mediaType) == 1 and mediaType[0] == 'audio':
usb.append(["USB-" + name, ["./action.sh", subdir, "audio", " ".join(files)]])
elif len(mediaType) == 1 and mediaType[0] == 'video':
usb.append(["USB-" + name, ["./submenu.sh", subdir, " ".join(files), "video", "listFiles"]])
elif len(mediaType) == 1 and mediaType[0] == 'image':
usb.append(["USB-" + name, ["./action.sh", subdir, "image", " ".join(files)]])
elif len(mediaType) > 1:
usb.append(["USB-" + name, ["./submenuMixed.sh", subdir, " ".join(files), " ".join(fileTypes)]])
#Si no hay una usb conectada, se agrega la opción para escanear la ruta nuevamente
else:
usb.append(["Scan USB", ["./restart.sh"]])
menuName = "Menú Principal"
#Lista de las opciones de streaming + las usb
action_list = [mubi, spotify] + usb
#Crea un nuevo menú dependiendo de los archivos que se encuentren en la usb
#Este menú se despliega cuando la usb tiene archivos de tipos diferentes.
elif (len(sys.argv)) == 4:
#Se obtienen los argumentos
subdir = sys.argv[1]
files = sys.argv[2].split(" ")
mediaType = sys.argv[3].split(" ")
#Se separan los archivos por tipo:
videoFiles = []
audioFiles = []
imageFiles = []
for i in range(len(files)):
videoFiles.append(files[i]) if mediaType[i] == "video" else \
audioFiles.append(files[i]) if mediaType[i] == "audio" else \
imageFiles.append(files[i]) if mediaType[i] == "image" else None
action_list = []
#Se crean las opciones de la interfaz dependiendo del tipo de los
#archivos que se encuentren en la usb
if videoFiles:
action_list.append(["Reproducir Videos", \
["./submenu.sh", subdir, " ".join(videoFiles), "video", "listFiles"]])
if audioFiles:
print(audioFiles)
action_list.append(["Reproducir toda la música", \
["./action.sh", subdir, "audio", " ".join(audioFiles)]])
action_list.append(["Seleccionar canción", \
["./submenu.sh", subdir," ".join(audioFiles), "audio", "listFiles"]])
if imageFiles:
action_list.append(["Reproducir todas las imágenes", \
["./action.sh", subdir, "image", " ".join(imageFiles)]])
action_list.append(["Seleccionar la imagen", \
["./submenu.sh", subdir, " ".join(imageFiles), "image", "listFiles"]])
action_list.append(["Atrás", ["./action.sh", "kill"]])
menuName = "Elige la accion"
#Si se recibien 5 argumentos se enlistan los archivos multimedia
#para seleccionarlos uno a uno.
#Este menú se despliega cuando la usb tiene solo archivos de video
#o si se selecciona la opción para elegir el archivo a reproducir
elif (len(sys.argv)) == 5:
subdir = sys.argv[1]
files = sys.argv[2].split(" ")
mode = sys.argv[3]
#Se enlistan los archivos multimedia dependiendo del tipo
if mode == "video":
action_list = [[f, ["./action.sh", subdir, "video", f]] for f in files]
if mode == "audio":
action_list = [[f, ["./action.sh", subdir, "audio", f]] for f in files]
if mode == "image":
action_list = [[f, ["./action.sh", subdir, "image", f]] for f in files]
action_list.append(["Atrás", ["./action.sh", "kill"]])
menuName = "Elige el archivo"
APP_NAME = 0
APP_CMD = 1
'''
Clase que hereda las características de threading.Thread para
ejecutar las funciones en nuevos hilos de ejecución.
Este está basado en la sección "Using Python for Automation and
Productivity" del libro "Raspberry Pi Cookbook for Python Programmers"
de <NAME>
'''
class runApplicatinThread(threading.Thread):
def __init__(self, app_cmd):
threading.Thread.__init__(self)
self.cmd = app_cmd
def run(self):
try:
cmd = call(self.cmd)
except:
print("No se puede correr: %s" % self.cmd)
'''
Se crea una nueva clase que sirve como base para la creación de los
botones de la interfaz. Cada item en el menu se compone de:
-Nombre de la acción: APP_NAME
-Acción a realizar: APP_CMD
Ambos campos se encuentran almacenados en la lista action_list, creada
en alguno de los campos anteriores
'''
class appButtons:
def __init__(self, gui, app_index):
btn = tk.Button(gui, text=action_list[app_index][APP_NAME], width = 30, command = self.startApp)
btn.pack()
self.app_cmd = action_list[app_index][APP_CMD]
'''
Este método se encarga de ejecutar la acción para cada opción
en un nuevo hilo
'''
def startApp(self):
print("APP_CDM: %s" % self.app_cmd)
runApplicatinThread(self.app_cmd).start()
root = tk.Tk()
root.title(menuName)
for index, app in enumerate(action_list):
runApplicatinThread.appButtons(root, index)
root.mainloop() | menu.py | #Biblioteca para crear la interfaz gráfica
import tkinter as tk
#Función para correr un comando
from subprocess import call
#Módulo para crear hilos
import threading
#Módulo para interactuar con el sistema operativo
import os
#Módulo para obtener el tipo de archivo
import mimetypes
#Módulo para acceder a las variables del intérprete
import sys
mimetypes.init()
#Si se recibe un solo argumento significa que se quiere lanzar
#el menú principal
if (len(sys.argv)) == 1:
#Se revisa si hay una usb conectada usando la biblioteca os
#se listan los subdirectorios en /media/pi que es
#en donde se hace el montaje de los dispositivos usb
media = "/media/pi"
#Si no se ha conectado una usb es posible que la ruta /media/pi
#no exista, por tal motivo se usa un bloque try
try:
subDirs = [dir.name for dir in os.scandir(media) if dir.is_dir()]
except:
subDirs = []
usb = []
#Definiendo las aplicaciones que se pueden utilizar
#["Nombre de App", "comando"]
#Algunos ejemplos de los servicios de streaming que se pueden usar:
#netflix = ["Netflix", ["./start.sh", "www.netflix.com"]]
#amazonPrime = ["Amazon", ["./start.sh", "www.primevideo.com"]]
mubi = ["Mubi", ["./start.sh", "www.mubi.com"]]
spotify = ["Spotify", ["./start.sh", "http://open.spotify.com"]]
#Si se encontró al menos un subdirectorio en /media/pi significa que
#hay al menos una usb conectada. En este caso el código está hecho para
#reconocer solo una usb pero se puede adaptar para más
if len(subDirs) > 0:
#Se obtiene el nombre de la usb
for name in subDirs:
subdir = os.path.join(media, name)
#Se obtienen los archivos en la usb
files = [f for f in os.scandir(subdir) if f.is_file()]
mediaType = []
fileTypes= []
#Se obtiene el tipo de archivo para cada archivo
for f in files:
fileType = mimetypes.guess_type(os.path.join(subdir, f))[0]
if fileType != None:
fileType = fileType.split("/")[0]
fileTypes.append(fileType)
mediaType.append(fileType) if fileType not in mediaType else mediaType
#Se crean nuevas opciones para el menú dependiendo del tipo de archivo
files = [f.name for f in files]
if len(mediaType) == 1 and mediaType[0] == 'audio':
usb.append(["USB-" + name, ["./action.sh", subdir, "audio", " ".join(files)]])
elif len(mediaType) == 1 and mediaType[0] == 'video':
usb.append(["USB-" + name, ["./submenu.sh", subdir, " ".join(files), "video", "listFiles"]])
elif len(mediaType) == 1 and mediaType[0] == 'image':
usb.append(["USB-" + name, ["./action.sh", subdir, "image", " ".join(files)]])
elif len(mediaType) > 1:
usb.append(["USB-" + name, ["./submenuMixed.sh", subdir, " ".join(files), " ".join(fileTypes)]])
#Si no hay una usb conectada, se agrega la opción para escanear la ruta nuevamente
else:
usb.append(["Scan USB", ["./restart.sh"]])
menuName = "Menú Principal"
#Lista de las opciones de streaming + las usb
action_list = [mubi, spotify] + usb
#Crea un nuevo menú dependiendo de los archivos que se encuentren en la usb
#Este menú se despliega cuando la usb tiene archivos de tipos diferentes.
elif (len(sys.argv)) == 4:
#Se obtienen los argumentos
subdir = sys.argv[1]
files = sys.argv[2].split(" ")
mediaType = sys.argv[3].split(" ")
#Se separan los archivos por tipo:
videoFiles = []
audioFiles = []
imageFiles = []
for i in range(len(files)):
videoFiles.append(files[i]) if mediaType[i] == "video" else \
audioFiles.append(files[i]) if mediaType[i] == "audio" else \
imageFiles.append(files[i]) if mediaType[i] == "image" else None
action_list = []
#Se crean las opciones de la interfaz dependiendo del tipo de los
#archivos que se encuentren en la usb
if videoFiles:
action_list.append(["Reproducir Videos", \
["./submenu.sh", subdir, " ".join(videoFiles), "video", "listFiles"]])
if audioFiles:
print(audioFiles)
action_list.append(["Reproducir toda la música", \
["./action.sh", subdir, "audio", " ".join(audioFiles)]])
action_list.append(["Seleccionar canción", \
["./submenu.sh", subdir," ".join(audioFiles), "audio", "listFiles"]])
if imageFiles:
action_list.append(["Reproducir todas las imágenes", \
["./action.sh", subdir, "image", " ".join(imageFiles)]])
action_list.append(["Seleccionar la imagen", \
["./submenu.sh", subdir, " ".join(imageFiles), "image", "listFiles"]])
action_list.append(["Atrás", ["./action.sh", "kill"]])
menuName = "Elige la accion"
#Si se recibien 5 argumentos se enlistan los archivos multimedia
#para seleccionarlos uno a uno.
#Este menú se despliega cuando la usb tiene solo archivos de video
#o si se selecciona la opción para elegir el archivo a reproducir
elif (len(sys.argv)) == 5:
subdir = sys.argv[1]
files = sys.argv[2].split(" ")
mode = sys.argv[3]
#Se enlistan los archivos multimedia dependiendo del tipo
if mode == "video":
action_list = [[f, ["./action.sh", subdir, "video", f]] for f in files]
if mode == "audio":
action_list = [[f, ["./action.sh", subdir, "audio", f]] for f in files]
if mode == "image":
action_list = [[f, ["./action.sh", subdir, "image", f]] for f in files]
action_list.append(["Atrás", ["./action.sh", "kill"]])
menuName = "Elige el archivo"
APP_NAME = 0
APP_CMD = 1
'''
Clase que hereda las características de threading.Thread para
ejecutar las funciones en nuevos hilos de ejecución.
Este está basado en la sección "Using Python for Automation and
Productivity" del libro "Raspberry Pi Cookbook for Python Programmers"
de <NAME>
'''
class runApplicatinThread(threading.Thread):
def __init__(self, app_cmd):
threading.Thread.__init__(self)
self.cmd = app_cmd
def run(self):
try:
cmd = call(self.cmd)
except:
print("No se puede correr: %s" % self.cmd)
'''
Se crea una nueva clase que sirve como base para la creación de los
botones de la interfaz. Cada item en el menu se compone de:
-Nombre de la acción: APP_NAME
-Acción a realizar: APP_CMD
Ambos campos se encuentran almacenados en la lista action_list, creada
en alguno de los campos anteriores
'''
class appButtons:
def __init__(self, gui, app_index):
btn = tk.Button(gui, text=action_list[app_index][APP_NAME], width = 30, command = self.startApp)
btn.pack()
self.app_cmd = action_list[app_index][APP_CMD]
'''
Este método se encarga de ejecutar la acción para cada opción
en un nuevo hilo
'''
def startApp(self):
print("APP_CDM: %s" % self.app_cmd)
runApplicatinThread(self.app_cmd).start()
root = tk.Tk()
root.title(menuName)
for index, app in enumerate(action_list):
runApplicatinThread.appButtons(root, index)
root.mainloop() | 0.087024 | 0.244431 |
__author__ = "<NAME> <<EMAIL>>"
import datetime
import os
import xml.etree.ElementTree as ElementTree
from dateutil import parser
from icalendar import Calendar, Event
import requests
class Convert():
def __init__(self, filename):
self.filename = filename
def get_subjects(self):
result = []
try:
tree = ElementTree.parse(self.filename)
root = tree.getroot()
except:
tree = ElementTree.fromstring(self.filename)
root = tree
for subject in root.iter('subject'):
name = subject.find("name").get("value")
single_subject = {}
single_subject["name"] = name
single_subject["professor"] = subject.find("professor").get("value")
single_subject["info"] = list(map(
lambda x: {
"day": x.get("day"),
"place" : x.get("place"),
"startAt": '{:02d}:{:02d}'.format(*divmod(int(x.get("starttime")) * 5, 60)),
"endAt": '{:02d}:{:02d}'.format(*divmod(int(x.get("endtime")) * 5, 60))
}, subject.find("time").findall("data")
)
)
result.append(single_subject)
return result
def get_calendar(self, timetable, start_date, end_date):
cal = Calendar()
for item in timetable:
for time in item["info"]:
event = Event()
event.add('summary', item["name"])
event.add('dtstart', parser.parse("%s %s" % (self.get_nearest_date(start_date, time["day"]), time["startAt"])))
event.add('dtend', parser.parse("%s %s" % (self.get_nearest_date(start_date, time["day"]), time["endAt"])))
event.add('rrule', {'freq': 'WEEKLY', 'until': parser.parse(end_date)})
cal.add_component(event)
f = open(os.path.join('', 'calendar.ics'), 'wb')
f.write(cal.to_ical())
f.close()
print("작업 완료!🙌")
def get_nearest_date(self, start_date, weekday):
start_date = parser.parse(start_date)
weekday = int(weekday)
if start_date.weekday() >= weekday:
if start_date.weekday() > weekday: start_date += datetime.timedelta(days=7)
start_date -= datetime.timedelta(start_date.weekday() - weekday)
else:
start_date += datetime.timedelta(weekday - start_date.weekday())
return start_date | convert.py | __author__ = "<NAME> <<EMAIL>>"
import datetime
import os
import xml.etree.ElementTree as ElementTree
from dateutil import parser
from icalendar import Calendar, Event
import requests
class Convert():
def __init__(self, filename):
self.filename = filename
def get_subjects(self):
result = []
try:
tree = ElementTree.parse(self.filename)
root = tree.getroot()
except:
tree = ElementTree.fromstring(self.filename)
root = tree
for subject in root.iter('subject'):
name = subject.find("name").get("value")
single_subject = {}
single_subject["name"] = name
single_subject["professor"] = subject.find("professor").get("value")
single_subject["info"] = list(map(
lambda x: {
"day": x.get("day"),
"place" : x.get("place"),
"startAt": '{:02d}:{:02d}'.format(*divmod(int(x.get("starttime")) * 5, 60)),
"endAt": '{:02d}:{:02d}'.format(*divmod(int(x.get("endtime")) * 5, 60))
}, subject.find("time").findall("data")
)
)
result.append(single_subject)
return result
def get_calendar(self, timetable, start_date, end_date):
cal = Calendar()
for item in timetable:
for time in item["info"]:
event = Event()
event.add('summary', item["name"])
event.add('dtstart', parser.parse("%s %s" % (self.get_nearest_date(start_date, time["day"]), time["startAt"])))
event.add('dtend', parser.parse("%s %s" % (self.get_nearest_date(start_date, time["day"]), time["endAt"])))
event.add('rrule', {'freq': 'WEEKLY', 'until': parser.parse(end_date)})
cal.add_component(event)
f = open(os.path.join('', 'calendar.ics'), 'wb')
f.write(cal.to_ical())
f.close()
print("작업 완료!🙌")
def get_nearest_date(self, start_date, weekday):
start_date = parser.parse(start_date)
weekday = int(weekday)
if start_date.weekday() >= weekday:
if start_date.weekday() > weekday: start_date += datetime.timedelta(days=7)
start_date -= datetime.timedelta(start_date.weekday() - weekday)
else:
start_date += datetime.timedelta(weekday - start_date.weekday())
return start_date | 0.258139 | 0.089177 |
from util.plans import Leg
class DNASeqLeg(Leg):
primary_handles = [
"Yeast Library",
"Plasmid Library",
"Zymoprepped sample",
"Exonucleased sample",
"Template",
"Fragment",
"Gel",
"qPCR sample in",
"qPCR sample out",
"DNA library in",
"DNA library out"
]
def __init__(self, plan_step, cursor):
super().__init__(plan_step, cursor)
def set_yeast(self, input_sample_uri):
input_sample = self.plan.input_sample(input_sample_uri)
self.set_yeast_from_sample(input_sample)
def set_yeast_from_sample(self, input_sample):
for h in self.primary_handles:
self.sample_io[h] = input_sample
def set_sample_io(self, io_obj):
self.sample_io = { **self.sample_io, **io_obj }
class ExtractDNALeg(DNASeqLeg):
leg_order = [
{"name": "Treat With Zymolyase", "category": "Next Gen Prep"},
{"name": "Yeast Plasmid Extraction", "category": "Next Gen Prep"},
{"name": "Digest Genomic DNA", "category": "Next Gen Prep"}
]
def __init__(self, plan_step, cursor):
super().__init__(plan_step, cursor)
class QPCRLeg(DNASeqLeg):
leg_order = [
{"name": None, "category": "Preparative qPCR"},
{"name": "Run Pre-poured Gel", "category": "Next Gen Prep"},
{"name": "Extract Gel Slice (NGS)", "category": "Next Gen Prep"},
{"name": "Purify Gel Slice (NGS)", "category": "Next Gen Prep"}
]
def __init__(self, plan_step, cursor, plates=False):
qpcr_operation_type = "Make qPCR Fragment"
if plates: qpcr_operation_type += " WITH PLATES"
self.leg_order[0]["name"] = qpcr_operation_type
super().__init__(plan_step, cursor)
class DiluteLibraryLeg(DNASeqLeg):
leg_order = [
{"name": "Qubit concentration", "category": "Next Gen Prep"},
{"name": "Dilute to 4nM", "category": "Next Gen Prep"}
]
def __init__(self, plan_step, cursor):
super().__init__(plan_step, cursor) | menagerie/util/dna_seq_legs.py | from util.plans import Leg
class DNASeqLeg(Leg):
primary_handles = [
"Yeast Library",
"Plasmid Library",
"Zymoprepped sample",
"Exonucleased sample",
"Template",
"Fragment",
"Gel",
"qPCR sample in",
"qPCR sample out",
"DNA library in",
"DNA library out"
]
def __init__(self, plan_step, cursor):
super().__init__(plan_step, cursor)
def set_yeast(self, input_sample_uri):
input_sample = self.plan.input_sample(input_sample_uri)
self.set_yeast_from_sample(input_sample)
def set_yeast_from_sample(self, input_sample):
for h in self.primary_handles:
self.sample_io[h] = input_sample
def set_sample_io(self, io_obj):
self.sample_io = { **self.sample_io, **io_obj }
class ExtractDNALeg(DNASeqLeg):
leg_order = [
{"name": "Treat With Zymolyase", "category": "Next Gen Prep"},
{"name": "Yeast Plasmid Extraction", "category": "Next Gen Prep"},
{"name": "Digest Genomic DNA", "category": "Next Gen Prep"}
]
def __init__(self, plan_step, cursor):
super().__init__(plan_step, cursor)
class QPCRLeg(DNASeqLeg):
leg_order = [
{"name": None, "category": "Preparative qPCR"},
{"name": "Run Pre-poured Gel", "category": "Next Gen Prep"},
{"name": "Extract Gel Slice (NGS)", "category": "Next Gen Prep"},
{"name": "Purify Gel Slice (NGS)", "category": "Next Gen Prep"}
]
def __init__(self, plan_step, cursor, plates=False):
qpcr_operation_type = "Make qPCR Fragment"
if plates: qpcr_operation_type += " WITH PLATES"
self.leg_order[0]["name"] = qpcr_operation_type
super().__init__(plan_step, cursor)
class DiluteLibraryLeg(DNASeqLeg):
leg_order = [
{"name": "Qubit concentration", "category": "Next Gen Prep"},
{"name": "Dilute to 4nM", "category": "Next Gen Prep"}
]
def __init__(self, plan_step, cursor):
super().__init__(plan_step, cursor) | 0.622689 | 0.374104 |
import pandas as pd
from ....Trade.Strategy.Cta.DyST_TraceFocus import *
from ....Trade.Strategy.DyStockCtaBase import *
from ....Trade.DyStockStrategyBase import *
class DyStockDataFocusAnalysisUtility(object):
"""
热点分析工具类
这个类有点特别,会借助DyST_FocusTrace类
"""
class DummyCtaEngine:
def __init__(self, eventEngine):
self.errorInfo = DyErrorInfo(eventEngine)
self.errorDataEngine = DyStockDataEngine(eventEngine, self.errorInfo, registerEvent=False)
self.dataEngine = self.errorDataEngine
self.dummyInfo = DyDummyInfo()
self.dummyDataEngine = DyStockDataEngine(eventEngine, self.dummyInfo, registerEvent=False)
def loadPreparedData(self, *args, **kwargs):
return None
def tDaysOffsetInDb(self, base, n=0):
return self.dataEngine.daysEngine.tDaysOffsetInDb(base, n)
def loadOnClose(self, *args, **kwargs):
return None
def putStockMarketMonitorUiEvent(self, *args, **kwargs):
pass
def __getattr__(self, name):
return None
def _convert2Tick(day, code, name, df):
"""
@df: 含有'preClose'列
"""
tick = DyStockCtaTickData()
try:
s = df.ix[day]
pos = df.index.get_loc(day)
if pos == 0:
return None
except Exception:
return None
tick.code = code
tick.name = name
tick.date = day
tick.time = '15:00:00'
tick.datetime = datetime.strptime(day + ' 15:00:00', '%Y-%m-%d %H:%M:%S')
tick.preClose = df.ix[pos - 1, 'close']
tick.price = s['close']
tick.open = s['open']
tick.high = s['high']
tick.low = s['low']
tick.volume = s['volume']
tick.amount = s['amt']
return tick
def _convert2Ticks(day, dfs, codeTable):
ticks = {}
for code, df in dfs.items():
tick = DyStockDataFocusAnalysisUtility._convert2Tick(day, code, codeTable[code], df)
if tick is None:
continue
ticks[code] = tick
return ticks
def _createFocusStrengthDf(dayIndex, focusInfoPool):
data = {}
for focus, focusInfo in focusInfoPool.items():
data[focus] = [focusInfo.strength]
df = pd.DataFrame(data, index=[dayIndex])
return df
def _initTraceFocusObj(traceFocusObj, date, info, codes, conceptsDict, dummyDaysEngine):
"""
Initialize prepared data
"""
# init
traceFocusObj._curInit(date)
# we only update UI for first time
if traceFocusObj._preparedData:
info = DyDummyInfo()
# only classify codes not in 'oldStocks' dict
codes = set(codes) - set(traceFocusObj._preparedData.get('oldStocks', []))
preparedData = DyST_TraceFocus.classifyCodes(date, codes, info, dummyDaysEngine, conceptsDict)
# update prepared data of DyST_TraceFocus object
traceFocusObj._preparedData.setdefault('oldStocks', {}).update(preparedData['oldStocks'])
traceFocusObj._preparedData['newStocks'] = preparedData['newStocks']
def _changeTraceFocusObj(traceFocusObj):
"""
replace dragons in focus info pool by [[code, name]]
"""
for _, focusInfo in traceFocusObj._focusInfoPool.items():
focusInfo.dragons = [[code, traceFocusObj._focusCodePool[code].name] for code in focusInfo.dragons]
def _incrementAnalysis(dummyTraceFocusObj,
day,
info,
codes,
dfs,
codeTable,
conceptsDict,
dummyDaysEngine):
"""
增量分析每日热点,这样只需要增量归类归类股票
"""
# initialize incremently
DyStockDataFocusAnalysisUtility._initTraceFocusObj(dummyTraceFocusObj,
day,
info,
codes,
conceptsDict,
dummyDaysEngine)
# push ticks
ticks = DyStockDataFocusAnalysisUtility._convert2Ticks(day, dfs, codeTable)
if ticks:
dummyTraceFocusObj.onTicks(ticks)
DyStockDataFocusAnalysisUtility._changeTraceFocusObj(dummyTraceFocusObj)
return dummyTraceFocusObj._focusInfoPool
def analysis(dfs, indexDfIndex, codeTable, eventEngine, info):
"""
@dfs: {code: df}, 不含指数
@indexDfIndex: 对应的指数DF的index
@return: foucs strength DF, dict of focus info pool
"""
dummyCtaEngine = DyStockDataFocusAnalysisUtility.DummyCtaEngine(eventEngine)
dummyTraceFocusObj = DyST_TraceFocus(dummyCtaEngine, dummyCtaEngine.errorInfo, DyStockStrategyState(DyStockStrategyState.backTesting)) # create a dummy instance of DyST_TraceFoucs
# classify first time
assert indexDfIndex.size > 1
codes = list(dfs)
conceptsDict = DyST_TraceFocus.getConceptsFromFile()
DyStockDataFocusAnalysisUtility._initTraceFocusObj(dummyTraceFocusObj,
indexDfIndex[0].strftime("%Y-%m-%d"),
info,
codes,
conceptsDict,
dummyCtaEngine.dummyDataEngine.daysEngine)
# focus analysis
info.print('开始热点分析...', DyLogData.ind)
progress = DyProgress(info)
progress.init(indexDfIndex.size)
focusInfoPoolDict = {} # {day: focus info pool}
focusStrengthDfList = [] # [focus DF of one day]
for dayIndex in indexDfIndex:
day = dayIndex.strftime("%Y-%m-%d")
# analysis incremently
focusInfoPool = DyStockDataFocusAnalysisUtility._incrementAnalysis(dummyTraceFocusObj,
day,
info,
codes,
dfs,
codeTable,
conceptsDict,
dummyCtaEngine.dummyDataEngine.daysEngine)
focusInfoPoolDict[day] = focusInfoPool
focusStrengthDfList.append(DyStockDataFocusAnalysisUtility._createFocusStrengthDf(dayIndex, focusInfoPool))
progress.update()
# concatenate into DF and 按热点出现次数排序(列排序)
focusStrengthDf = pd.concat(focusStrengthDfList)
columns = list(focusStrengthDf.columns)
columns = sorted(columns, key=lambda x: focusStrengthDf[x].notnull().sum(), reverse=True)
focusStrengthDf = focusStrengthDf.reindex(columns=columns)
info.print('热点分析完成', DyLogData.ind)
return focusStrengthDf, focusInfoPoolDict
def _analysisProcess(outQueue, days, dayIndexes, info, dummyTraceFocusObj, dfs, codeTable, conceptsDict, dummyDaysEngine):
"""
以子进程方式分析每日热点
"""
codes = list(dfs)
for day, dayIndex in zip(days, dayIndexes):
# analysis incremently
focusInfoPool = DyStockDataFocusAnalysisUtility._incrementAnalysis(dummyTraceFocusObj,
day,
info,
codes,
dfs,
codeTable,
conceptsDict,
dummyDaysEngine)
outQueue.put([day, dayIndex, focusInfoPool]) | Stock/Data/Utility/Other/DyStockDataFocusAnalysisUtility.py | import pandas as pd
from ....Trade.Strategy.Cta.DyST_TraceFocus import *
from ....Trade.Strategy.DyStockCtaBase import *
from ....Trade.DyStockStrategyBase import *
class DyStockDataFocusAnalysisUtility(object):
"""
热点分析工具类
这个类有点特别,会借助DyST_FocusTrace类
"""
class DummyCtaEngine:
def __init__(self, eventEngine):
self.errorInfo = DyErrorInfo(eventEngine)
self.errorDataEngine = DyStockDataEngine(eventEngine, self.errorInfo, registerEvent=False)
self.dataEngine = self.errorDataEngine
self.dummyInfo = DyDummyInfo()
self.dummyDataEngine = DyStockDataEngine(eventEngine, self.dummyInfo, registerEvent=False)
def loadPreparedData(self, *args, **kwargs):
return None
def tDaysOffsetInDb(self, base, n=0):
return self.dataEngine.daysEngine.tDaysOffsetInDb(base, n)
def loadOnClose(self, *args, **kwargs):
return None
def putStockMarketMonitorUiEvent(self, *args, **kwargs):
pass
def __getattr__(self, name):
return None
def _convert2Tick(day, code, name, df):
"""
@df: 含有'preClose'列
"""
tick = DyStockCtaTickData()
try:
s = df.ix[day]
pos = df.index.get_loc(day)
if pos == 0:
return None
except Exception:
return None
tick.code = code
tick.name = name
tick.date = day
tick.time = '15:00:00'
tick.datetime = datetime.strptime(day + ' 15:00:00', '%Y-%m-%d %H:%M:%S')
tick.preClose = df.ix[pos - 1, 'close']
tick.price = s['close']
tick.open = s['open']
tick.high = s['high']
tick.low = s['low']
tick.volume = s['volume']
tick.amount = s['amt']
return tick
def _convert2Ticks(day, dfs, codeTable):
ticks = {}
for code, df in dfs.items():
tick = DyStockDataFocusAnalysisUtility._convert2Tick(day, code, codeTable[code], df)
if tick is None:
continue
ticks[code] = tick
return ticks
def _createFocusStrengthDf(dayIndex, focusInfoPool):
data = {}
for focus, focusInfo in focusInfoPool.items():
data[focus] = [focusInfo.strength]
df = pd.DataFrame(data, index=[dayIndex])
return df
def _initTraceFocusObj(traceFocusObj, date, info, codes, conceptsDict, dummyDaysEngine):
"""
Initialize prepared data
"""
# init
traceFocusObj._curInit(date)
# we only update UI for first time
if traceFocusObj._preparedData:
info = DyDummyInfo()
# only classify codes not in 'oldStocks' dict
codes = set(codes) - set(traceFocusObj._preparedData.get('oldStocks', []))
preparedData = DyST_TraceFocus.classifyCodes(date, codes, info, dummyDaysEngine, conceptsDict)
# update prepared data of DyST_TraceFocus object
traceFocusObj._preparedData.setdefault('oldStocks', {}).update(preparedData['oldStocks'])
traceFocusObj._preparedData['newStocks'] = preparedData['newStocks']
def _changeTraceFocusObj(traceFocusObj):
"""
replace dragons in focus info pool by [[code, name]]
"""
for _, focusInfo in traceFocusObj._focusInfoPool.items():
focusInfo.dragons = [[code, traceFocusObj._focusCodePool[code].name] for code in focusInfo.dragons]
def _incrementAnalysis(dummyTraceFocusObj,
day,
info,
codes,
dfs,
codeTable,
conceptsDict,
dummyDaysEngine):
"""
增量分析每日热点,这样只需要增量归类归类股票
"""
# initialize incremently
DyStockDataFocusAnalysisUtility._initTraceFocusObj(dummyTraceFocusObj,
day,
info,
codes,
conceptsDict,
dummyDaysEngine)
# push ticks
ticks = DyStockDataFocusAnalysisUtility._convert2Ticks(day, dfs, codeTable)
if ticks:
dummyTraceFocusObj.onTicks(ticks)
DyStockDataFocusAnalysisUtility._changeTraceFocusObj(dummyTraceFocusObj)
return dummyTraceFocusObj._focusInfoPool
def analysis(dfs, indexDfIndex, codeTable, eventEngine, info):
"""
@dfs: {code: df}, 不含指数
@indexDfIndex: 对应的指数DF的index
@return: foucs strength DF, dict of focus info pool
"""
dummyCtaEngine = DyStockDataFocusAnalysisUtility.DummyCtaEngine(eventEngine)
dummyTraceFocusObj = DyST_TraceFocus(dummyCtaEngine, dummyCtaEngine.errorInfo, DyStockStrategyState(DyStockStrategyState.backTesting)) # create a dummy instance of DyST_TraceFoucs
# classify first time
assert indexDfIndex.size > 1
codes = list(dfs)
conceptsDict = DyST_TraceFocus.getConceptsFromFile()
DyStockDataFocusAnalysisUtility._initTraceFocusObj(dummyTraceFocusObj,
indexDfIndex[0].strftime("%Y-%m-%d"),
info,
codes,
conceptsDict,
dummyCtaEngine.dummyDataEngine.daysEngine)
# focus analysis
info.print('开始热点分析...', DyLogData.ind)
progress = DyProgress(info)
progress.init(indexDfIndex.size)
focusInfoPoolDict = {} # {day: focus info pool}
focusStrengthDfList = [] # [focus DF of one day]
for dayIndex in indexDfIndex:
day = dayIndex.strftime("%Y-%m-%d")
# analysis incremently
focusInfoPool = DyStockDataFocusAnalysisUtility._incrementAnalysis(dummyTraceFocusObj,
day,
info,
codes,
dfs,
codeTable,
conceptsDict,
dummyCtaEngine.dummyDataEngine.daysEngine)
focusInfoPoolDict[day] = focusInfoPool
focusStrengthDfList.append(DyStockDataFocusAnalysisUtility._createFocusStrengthDf(dayIndex, focusInfoPool))
progress.update()
# concatenate into DF and 按热点出现次数排序(列排序)
focusStrengthDf = pd.concat(focusStrengthDfList)
columns = list(focusStrengthDf.columns)
columns = sorted(columns, key=lambda x: focusStrengthDf[x].notnull().sum(), reverse=True)
focusStrengthDf = focusStrengthDf.reindex(columns=columns)
info.print('热点分析完成', DyLogData.ind)
return focusStrengthDf, focusInfoPoolDict
def _analysisProcess(outQueue, days, dayIndexes, info, dummyTraceFocusObj, dfs, codeTable, conceptsDict, dummyDaysEngine):
"""
以子进程方式分析每日热点
"""
codes = list(dfs)
for day, dayIndex in zip(days, dayIndexes):
# analysis incremently
focusInfoPool = DyStockDataFocusAnalysisUtility._incrementAnalysis(dummyTraceFocusObj,
day,
info,
codes,
dfs,
codeTable,
conceptsDict,
dummyDaysEngine)
outQueue.put([day, dayIndex, focusInfoPool]) | 0.354321 | 0.209268 |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units , Data
from .Lithium_Ion import Lithium_Ion
from SUAVE.Methods.Power.Battery.Cell_Cycle_Models.LiNiMnCoO2_cell_cycle_model import compute_NMC_cell_state_variables
from SUAVE.Methods.Power.Battery.compute_net_generated_battery_heat import compute_net_generated_battery_heat
import numpy as np
import os
from scipy.integrate import cumtrapz
from scipy.interpolate import RegularGridInterpolator
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
class Lithium_Ion_LiNiMnCoO2_18650(Lithium_Ion):
""" Specifies discharge/specific energy characteristics specific
18650 lithium-nickel-manganese-cobalt-oxide battery cells
Assumptions:
Convective Thermal Conductivity Coefficient corresponds to forced
air cooling in 35 m/s air
Source:
Automotive Industrial Systems Company of Panasonic Group, Technical Information of
NCR18650G, URL https://www.imrbatteries.com/content/panasonic_ncr18650g.pdf
convective heat transfer coefficient, h
Jeon, Dong Hyup, and Seung Man Baek. "Thermal modeling of cylindrical
lithium ion battery during discharge cycle." Energy Conversion and Management
52.8-9 (2011): 2973-2981.
thermal conductivity, k
Yang, Shuting, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
specific heat capacity, Cp
(axial and radial)
<NAME>, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
# Electrode Area
Muenzel, Valentin, et al. "A comparative testing study of commercial
18650-format lithium-ion battery cells." Journal of The Electrochemical
Society 162.8 (2015): A1592.
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
def __defaults__(self):
self.tag = 'Lithium_Ion_LiNiMnCoO2_Cell'
self.cell.diameter = 0.0185 # [m]
self.cell.height = 0.0653 # [m]
self.cell.mass = 0.048 * Units.kg # [kg]
self.cell.surface_area = (np.pi*self.cell.height*self.cell.diameter) + (0.5*np.pi*self.cell.diameter**2) # [m^2]
self.cell.volume = np.pi*(0.5*self.cell.diameter)**2*self.cell.height
self.cell.density = self.cell.mass/self.cell.volume # [kg/m^3]
self.cell.electrode_area = 0.0342 # [m^2]
self.cell.max_voltage = 4.2 # [V]
self.cell.nominal_capacity = 3.55 # [Amp-Hrs]
self.cell.nominal_voltage = 3.6 # [V]
self.cell.charging_voltage = self.cell.nominal_voltage # [V]
self.watt_hour_rating = self.cell.nominal_capacity * self.cell.nominal_voltage # [Watt-hours]
self.specific_energy = self.watt_hour_rating*Units.Wh/self.cell.mass # [J/kg]
self.specific_power = self.specific_energy/self.cell.nominal_capacity # [W/kg]
self.resistance = 0.025 # [Ohms]
self.specific_heat_capacity = 1108 # [J/kgK]
self.cell.specific_heat_capacity = 1108 # [J/kgK]
self.cell.radial_thermal_conductivity = 0.4 # [J/kgK]
self.cell.axial_thermal_conductivity = 32.2 # [J/kgK] # estimated
battery_raw_data = load_battery_results()
self.discharge_performance_map = create_discharge_performance_map(battery_raw_data)
return
def energy_calc(self,numerics,battery_discharge_flag = True ):
'''This is an electric cycle model for 18650 lithium-nickel-manganese-cobalt-oxide
battery cells. The model uses experimental data performed
by the Automotive Industrial Systems Company of Panasonic Group
Sources:
Internal Resistance Model:
<NAME>., <NAME>., <NAME>., and <NAME>., "Combined State of Charge and State of
Health estimation over lithium-ion battery cellcycle lifespan for electric
vehicles,"Journal of Power Sources, Vol. 273, 2015, pp. 793-803.
doi:10.1016/j.jpowsour.2014.09.146,URLhttp://dx.doi.org/10.1016/j.jpowsour.2014.09.146.
Battery Heat Generation Model and Entropy Model:
Jeon, <NAME>, and <NAME>. "Thermal modeling of cylindrical lithium ion
battery during discharge cycle." Energy Conversion and Management 52.8-9 (2011):
2973-2981.
Assumtions:
1) All battery modules exhibit the same themal behaviour.
Inputs:
battery.
I_bat (max_energy) [Joules]
cell_mass (battery cell mass) [kilograms]
Cp (battery cell specific heat capacity) [J/(K kg)]
t (battery age in days) [days]
T_ambient (ambient temperature) [Kelvin]
T_current (pack temperature) [Kelvin]
T_cell (battery cell temperature) [Kelvin]
E_max (max energy) [Joules]
E_current (current energy) [Joules]
Q_prior (charge throughput) [Amp-hrs]
R_growth_factor (internal resistance growth factor) [unitless]
inputs.
I_bat (current) [amps]
P_bat (power) [Watts]
Outputs:
battery.
current_energy [Joules]
cell_temperature [Kelvin]
resistive_losses [Watts]
load_power [Watts]
current [Amps]
battery_voltage_open_circuit [Volts]
cell_charge_throughput [Amp-hrs]
internal_resistance [Ohms]
battery_state_of_charge [unitless]
depth_of_discharge [unitless]
battery_voltage_under_load [Volts]
'''
# Unpack varibles
battery = self
I_bat = battery.inputs.current
P_bat = battery.inputs.power_in
electrode_area = battery.cell.electrode_area
As_cell = battery.cell.surface_area
T_current = battery.pack_temperature
T_cell = battery.cell_temperature
E_max = battery.max_energy
E_current = battery.current_energy
Q_prior = battery.cell_charge_throughput
battery_data = battery.discharge_performance_map
I = numerics.time.integrate
D = numerics.time.differentiate
# ---------------------------------------------------------------------------------
# Compute battery electrical properties
# ---------------------------------------------------------------------------------
# Calculate the current going into one cell
n_series = battery.pack_config.series
n_parallel = battery.pack_config.parallel
n_total = battery.pack_config.total
Nn = battery.module_config.normal_count
Np = battery.module_config.parallel_count
n_total_module = Nn*Np
if battery_discharge_flag:
I_cell = I_bat/n_parallel
else:
I_cell = -I_bat/n_parallel
# State of charge of the battery
initial_discharge_state = np.dot(I,P_bat) + E_current[0]
SOC_old = np.divide(initial_discharge_state,E_max)
# Make sure things do not break by limiting current, temperature and current
SOC_old[SOC_old < 0.] = 0.
SOC_old[SOC_old > 1.] = 1.
T_cell[T_cell<272.65] = 272.65
T_cell[T_cell>322.65] = 322.65
battery.cell_temperature = T_cell
battery.pack_temperature = T_cell
# ---------------------------------------------------------------------------------
# Compute battery cell temperature
# ---------------------------------------------------------------------------------
# Determine temperature increase
sigma = 139 # Electrical conductivity
n = 1
F = 96485 # C/mol Faraday constant
delta_S = -496.66*(SOC_old)**6 + 1729.4*(SOC_old)**5 + -2278 *(SOC_old)**4 + 1382.2 *(SOC_old)**3 + \
-380.47*(SOC_old)**2 + 46.508*(SOC_old) + -10.692
i_cell = I_cell/electrode_area # current intensity
q_dot_entropy = -(T_cell)*delta_S*i_cell/(n*F)
q_dot_joule = (i_cell**2)/sigma
Q_heat_gen = (q_dot_joule + q_dot_entropy)*As_cell
q_joule_frac = q_dot_joule/(q_dot_joule + q_dot_entropy)
q_entropy_frac = q_dot_entropy/(q_dot_joule + q_dot_entropy)
# Compute cell temperature
T_current = compute_net_generated_battery_heat(n_total,battery,Q_heat_gen,numerics)
# Power going into the battery accounting for resistance losses
P_loss = n_total*Q_heat_gen
P = P_bat - np.abs(P_loss)
# Compute State Variables
V_ul = compute_NMC_cell_state_variables(battery_data,SOC_old,T_cell,I_cell)
# Li-ion battery interal resistance
R_0 = 0.01483*(SOC_old**2) - 0.02518*SOC_old + 0.1036
# Voltage under load:
V_oc = V_ul + (I_cell * R_0)
# ---------------------------------------------------------------------------------
# Compute updates state of battery
# ---------------------------------------------------------------------------------
# Possible Energy going into the battery:
energy_unmodified = np.dot(I,P)
# Available capacity
capacity_available = E_max - battery.current_energy[0]
# How much energy the battery could be overcharged by
delta = energy_unmodified -capacity_available
delta[delta<0.] = 0.
# Power that shouldn't go in
ddelta = np.dot(D,delta)
# Power actually going into the battery
P[P>0.] = P[P>0.] - ddelta[P>0.]
E_bat = np.dot(I,P)
E_bat = np.reshape(E_bat,np.shape(E_current)) #make sure it's consistent
# Add this to the current state
if np.isnan(E_bat).any():
E_bat=np.ones_like(E_bat)*np.max(E_bat)
if np.isnan(E_bat.any()): #all nans; handle this instance
E_bat=np.zeros_like(E_bat)
E_current = E_bat + E_current[0]
# Determine new State of Charge
SOC_new = np.divide(E_current, E_max)
SOC_new[SOC_new<0] = 0.
SOC_new[SOC_new>1] = 1.
DOD_new = 1 - SOC_new
# Determine new charge throughput (the amount of charge gone through the battery)
Q_total = np.atleast_2d(np.hstack(( Q_prior[0] , Q_prior[0] + cumtrapz(I_cell[:,0], x = numerics.time.control_points[:,0])/Units.hr ))).T
# If SOC is negative, voltage under load goes to zero
V_ul[SOC_new < 0.] = 0.
# Pack outputs
battery.current_energy = E_current
battery.cell_temperature = T_current
battery.pack_temperature = T_current
battery.cell_joule_heat_fraction = q_joule_frac
battery.cell_entropy_heat_fraction = q_entropy_frac
battery.resistive_losses = P_loss
battery.load_power = V_ul*n_series*I_bat
battery.current = I_bat
battery.voltage_open_circuit = V_oc*n_series
battery.cell_voltage_open_circuit = V_oc
battery.cell_current = I_cell
battery.cell_charge_throughput = Q_total
battery.heat_energy_generated = Q_heat_gen*n_total_module
battery.internal_resistance = R_0*n_series
battery.state_of_charge = SOC_new
battery.depth_of_discharge = DOD_new
battery.voltage_under_load = V_ul*n_series
battery.cell_voltage_under_load = V_ul
return battery
def append_battery_unknowns(self,segment):
""" Appends unknowns specific to NMC cells which are unpacked from the mission solver and send to the network.
Assumptions:
None
Source:
N/A
Inputs:
segment.state.unknowns.battery_cell_temperature [Kelvin]
segment.state.unknowns.battery_state_of_charge [unitless]
segment.state.unknowns.battery_current [Amperes]
Outputs:
segment.state.conditions.propulsion.battery_cell_temperature [Kelvin]
segment.state.conditions.propulsion.battery_state_of_charge [unitless]
segment.state.conditions.propulsion.battery_current [Amperes]
Properties Used:
N/A
"""
propulsion = segment.state.conditions.propulsion
propulsion.battery_cell_temperature[1:,:] = segment.state.unknowns.battery_cell_temperature[1:,:]
propulsion.battery_state_of_charge[1:,0] = segment.state.unknowns.battery_state_of_charge[:,0]
propulsion.battery_current = segment.state.unknowns.battery_current
return
def append_battery_residuals(self,segment,network):
""" Packs the residuals specific to NMC cells to be sent to the mission solver.
Assumptions:
None
Source:
N/A
Inputs:
segment.state.conditions.propulsion:
battery_state_of_charge [unitless]
battery_cell_temperature [Kelvin]
battery_current [Amperes]
segment.state.unknowns.
battery_state_of_charge [unitless]
battery_cell_temperature [Kelvin]
battery_current [Amperes]
Outputs:
None
Properties Used:
None
"""
SOC_actual = segment.state.conditions.propulsion.battery_state_of_charge
SOC_predict = segment.state.unknowns.battery_state_of_charge
Temp_actual = segment.state.conditions.propulsion.battery_cell_temperature
Temp_predict = segment.state.unknowns.battery_cell_temperature
i_actual = segment.state.conditions.propulsion.battery_current
i_predict = segment.state.unknowns.battery_current
# Return the residuals
segment.state.residuals.network.SOC = SOC_predict - SOC_actual[1:,:]
segment.state.residuals.network.temperature = Temp_predict - Temp_actual
segment.state.residuals.network.current = i_predict - i_actual
return
def append_battery_unknowns_and_residuals_to_segment(self,segment,initial_voltage,
initial_battery_cell_temperature , initial_battery_state_of_charge,
initial_battery_cell_current):
""" Sets up the information that the mission needs to run a mission segment using this network
Assumptions:
None
Source:
N/A
Inputs:
initial_voltage [volts]
initial_battery_cell_temperature [Kelvin]
initial_battery_state_of_charge [unitless]
initial_battery_cell_current [Amperes]
Outputs
None
Properties Used:
N/A
"""
# setup the state
ones_row = segment.state.unknowns.ones_row
ones_row_m1 = segment.state.unknowns.ones_row_m1
parallel = self.pack_config.parallel
segment.state.unknowns.battery_state_of_charge = initial_battery_state_of_charge * ones_row_m1(1)
segment.state.unknowns.battery_cell_temperature = initial_battery_cell_temperature * ones_row(1)
segment.state.unknowns.battery_current = initial_battery_cell_current*parallel * ones_row(1)
return
def compute_voltage(self,state):
""" Computes the voltage of a single NMC cell or a battery pack of NMC cells
Assumptions:
None
Source:
N/A
Inputs:
self - battery data structure [unitless]
state - segment unknowns to define voltage [unitless]
Outputs
V_ul - under-load voltage [volts]
Properties Used:
N/A
"""
# Unpack battery properties
battery = self
battery_data = battery.discharge_performance_map
n_series = battery.pack_config.series
n_parallel = battery.pack_config.parallel
# Unpack segment state properties
SOC = state.conditions.propulsion.battery_state_of_charge
T_cell = state.conditions.propulsion.battery_cell_temperature
I_cell = state.conditions.propulsion.battery_current/n_parallel
# Link Temperature and update
battery.cell_temperature = T_cell
# Compute State Variables
V_ul_cell = compute_NMC_cell_state_variables(battery_data,SOC,T_cell,I_cell)
# Voltage under load
V_ul = n_series*V_ul_cell
return V_ul
def update_battery_state_of_health(self,segment,increment_battery_cycle_day = False):
""" This is an aging model for 18650 lithium-nickel-manganese-cobalt-oxide batteries.
Source:
Schmalstieg, Johannes, et al. "A holistic aging model for Li (NiMnCo) O2
based 18650 lithium-ion batteries." Journal of Power Sources 257 (2014): 325-334.
Assumptions:
None
Inputs:
segment.conditions.propulsion.
battery_cycle_day [unitless]
battery_cell_temperature [Kelvin]
battery_voltage_open_circuit [Volts]
battery_charge_throughput [Amp-hrs]
battery_state_of_charge [unitless]
Outputs:
segment.conditions.propulsion.
battery_capacity_fade_factor (internal resistance growth factor) [unitless]
battery_resistance_growth_factor (capactance (energy) growth factor) [unitless]
Properties Used:
N/A
"""
n_series = self.pack_config.series
SOC = segment.conditions.propulsion.battery_state_of_charge
V_ul = segment.conditions.propulsion.battery_voltage_under_load/n_series
t = segment.conditions.propulsion.battery_cycle_day
Q_prior = segment.conditions.propulsion.battery_cell_charge_throughput[-1,0]
Temp = np.mean(segment.conditions.propulsion.battery_cell_temperature)
# aging model
delta_DOD = abs(SOC[0][0] - SOC[-1][0])
rms_V_ul = np.sqrt(np.mean(V_ul**2))
alpha_cap = (7.542*np.mean(V_ul) - 23.75) * 1E6 * np.exp(-6976/(Temp))
alpha_res = (5.270*np.mean(V_ul) - 16.32) * 1E5 * np.exp(-5986/(Temp))
beta_cap = 7.348E-3 * (rms_V_ul - 3.667)**2 + 7.60E-4 + 4.081E-3*delta_DOD
beta_res = 2.153E-4 * (rms_V_ul - 3.725)**2 - 1.521E-5 + 2.798E-4*delta_DOD
E_fade_factor = 1 - alpha_cap*(t**0.75) - beta_cap*np.sqrt(Q_prior)
R_growth_factor = 1 + alpha_res*(t**0.75) + beta_res*Q_prior
segment.conditions.propulsion.battery_capacity_fade_factor = np.minimum(E_fade_factor,segment.conditions.propulsion.battery_capacity_fade_factor)
segment.conditions.propulsion.battery_resistance_growth_factor = np.maximum(R_growth_factor,segment.conditions.propulsion.battery_resistance_growth_factor)
if increment_battery_cycle_day:
segment.conditions.propulsion.battery_cycle_day += 1 # update battery age by one day
return
def create_discharge_performance_map(battery_raw_data):
""" Creates discharge and charge response surface for
LiNiMnCoO2 battery cells
Source:
N/A
Assumptions:
N/A
Inputs:
Outputs:
battery_data
Properties Used:
N/A
"""
# Process raw data
processed_data = process_raw_data(battery_raw_data)
# Create performance maps
battery_data = create_response_surface(processed_data)
return battery_data
def create_response_surface(processed_data):
battery_map = Data()
amps = np.linspace(0, 8, 5)
temp = np.linspace(0, 50, 6) + 272.65
SOC = np.linspace(0, 1, 15)
battery_map.Voltage = RegularGridInterpolator((amps, temp, SOC), processed_data.Voltage)
battery_map.Temperature = RegularGridInterpolator((amps, temp, SOC), processed_data.Temperature)
return battery_map
def process_raw_data(raw_data):
""" Takes raw data and formats voltage as a function of SOC, current and temperature
Source
N/A
Assumptions:
N/A
Inputs:
raw_Data
Outputs:
procesed_data
Properties Used:
N/A
"""
processed_data = Data()
processed_data.Voltage = np.zeros((5,6,15,2)) # current , operating temperature , SOC vs voltage
processed_data.Temperature = np.zeros((5,6,15,2)) # current , operating temperature , SOC vs temperature
# Reshape Data
raw_data.Voltage
for i, Amps in enumerate(raw_data.Voltage):
for j , Deg in enumerate(Amps):
min_x = 0
max_x = max(Deg[:,0])
x = np.linspace(min_x,max_x,15)
y = np.interp(x,Deg[:,0],Deg[:,1])
vec = np.zeros((15,2))
vec[:,0] = x/max_x
vec[:,1] = y
processed_data.Voltage[i,j,:,:]= vec
for i, Amps in enumerate(raw_data.Temperature):
for j , Deg in enumerate(Amps):
min_x = 0
max_x = max(Deg[:,0])
x = np.linspace(min_x,max_x,15)
y = np.interp(x,Deg[:,0],Deg[:,1])
vec = np.zeros((15,2))
vec[:,0] = x/max_x
vec[:,1] = y
processed_data.Temperature[i,j,:,:]= vec
return processed_data
def load_battery_results():
'''Load experimental raw data of NMC cells
Source:
Automotive Industrial Systems Company of Panasonic Group, Technical Information of
NCR18650G, URL https://www.imrbatteries.com/content/panasonic_ncr18650g.pdf
Assumptions:
N/A
Inputs:
N/A
Outputs:
battery_data
Properties Used:
N/A
'''
ospath = os.path.abspath(__file__)
separator = os.path.sep
rel_path = os.path.dirname(ospath) + separator
return SUAVE.Input_Output.SUAVE.load(rel_path+ 'NMC_Raw_Data.res') | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiNiMnCoO2_18650.py |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units , Data
from .Lithium_Ion import Lithium_Ion
from SUAVE.Methods.Power.Battery.Cell_Cycle_Models.LiNiMnCoO2_cell_cycle_model import compute_NMC_cell_state_variables
from SUAVE.Methods.Power.Battery.compute_net_generated_battery_heat import compute_net_generated_battery_heat
import numpy as np
import os
from scipy.integrate import cumtrapz
from scipy.interpolate import RegularGridInterpolator
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
class Lithium_Ion_LiNiMnCoO2_18650(Lithium_Ion):
""" Specifies discharge/specific energy characteristics specific
18650 lithium-nickel-manganese-cobalt-oxide battery cells
Assumptions:
Convective Thermal Conductivity Coefficient corresponds to forced
air cooling in 35 m/s air
Source:
Automotive Industrial Systems Company of Panasonic Group, Technical Information of
NCR18650G, URL https://www.imrbatteries.com/content/panasonic_ncr18650g.pdf
convective heat transfer coefficient, h
Jeon, Dong Hyup, and Seung Man Baek. "Thermal modeling of cylindrical
lithium ion battery during discharge cycle." Energy Conversion and Management
52.8-9 (2011): 2973-2981.
thermal conductivity, k
Yang, Shuting, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
specific heat capacity, Cp
(axial and radial)
<NAME>, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
# Electrode Area
Muenzel, Valentin, et al. "A comparative testing study of commercial
18650-format lithium-ion battery cells." Journal of The Electrochemical
Society 162.8 (2015): A1592.
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
def __defaults__(self):
self.tag = 'Lithium_Ion_LiNiMnCoO2_Cell'
self.cell.diameter = 0.0185 # [m]
self.cell.height = 0.0653 # [m]
self.cell.mass = 0.048 * Units.kg # [kg]
self.cell.surface_area = (np.pi*self.cell.height*self.cell.diameter) + (0.5*np.pi*self.cell.diameter**2) # [m^2]
self.cell.volume = np.pi*(0.5*self.cell.diameter)**2*self.cell.height
self.cell.density = self.cell.mass/self.cell.volume # [kg/m^3]
self.cell.electrode_area = 0.0342 # [m^2]
self.cell.max_voltage = 4.2 # [V]
self.cell.nominal_capacity = 3.55 # [Amp-Hrs]
self.cell.nominal_voltage = 3.6 # [V]
self.cell.charging_voltage = self.cell.nominal_voltage # [V]
self.watt_hour_rating = self.cell.nominal_capacity * self.cell.nominal_voltage # [Watt-hours]
self.specific_energy = self.watt_hour_rating*Units.Wh/self.cell.mass # [J/kg]
self.specific_power = self.specific_energy/self.cell.nominal_capacity # [W/kg]
self.resistance = 0.025 # [Ohms]
self.specific_heat_capacity = 1108 # [J/kgK]
self.cell.specific_heat_capacity = 1108 # [J/kgK]
self.cell.radial_thermal_conductivity = 0.4 # [J/kgK]
self.cell.axial_thermal_conductivity = 32.2 # [J/kgK] # estimated
battery_raw_data = load_battery_results()
self.discharge_performance_map = create_discharge_performance_map(battery_raw_data)
return
def energy_calc(self,numerics,battery_discharge_flag = True ):
'''This is an electric cycle model for 18650 lithium-nickel-manganese-cobalt-oxide
battery cells. The model uses experimental data performed
by the Automotive Industrial Systems Company of Panasonic Group
Sources:
Internal Resistance Model:
<NAME>., <NAME>., <NAME>., and <NAME>., "Combined State of Charge and State of
Health estimation over lithium-ion battery cellcycle lifespan for electric
vehicles,"Journal of Power Sources, Vol. 273, 2015, pp. 793-803.
doi:10.1016/j.jpowsour.2014.09.146,URLhttp://dx.doi.org/10.1016/j.jpowsour.2014.09.146.
Battery Heat Generation Model and Entropy Model:
Jeon, <NAME>, and <NAME>. "Thermal modeling of cylindrical lithium ion
battery during discharge cycle." Energy Conversion and Management 52.8-9 (2011):
2973-2981.
Assumtions:
1) All battery modules exhibit the same themal behaviour.
Inputs:
battery.
I_bat (max_energy) [Joules]
cell_mass (battery cell mass) [kilograms]
Cp (battery cell specific heat capacity) [J/(K kg)]
t (battery age in days) [days]
T_ambient (ambient temperature) [Kelvin]
T_current (pack temperature) [Kelvin]
T_cell (battery cell temperature) [Kelvin]
E_max (max energy) [Joules]
E_current (current energy) [Joules]
Q_prior (charge throughput) [Amp-hrs]
R_growth_factor (internal resistance growth factor) [unitless]
inputs.
I_bat (current) [amps]
P_bat (power) [Watts]
Outputs:
battery.
current_energy [Joules]
cell_temperature [Kelvin]
resistive_losses [Watts]
load_power [Watts]
current [Amps]
battery_voltage_open_circuit [Volts]
cell_charge_throughput [Amp-hrs]
internal_resistance [Ohms]
battery_state_of_charge [unitless]
depth_of_discharge [unitless]
battery_voltage_under_load [Volts]
'''
# Unpack varibles
battery = self
I_bat = battery.inputs.current
P_bat = battery.inputs.power_in
electrode_area = battery.cell.electrode_area
As_cell = battery.cell.surface_area
T_current = battery.pack_temperature
T_cell = battery.cell_temperature
E_max = battery.max_energy
E_current = battery.current_energy
Q_prior = battery.cell_charge_throughput
battery_data = battery.discharge_performance_map
I = numerics.time.integrate
D = numerics.time.differentiate
# ---------------------------------------------------------------------------------
# Compute battery electrical properties
# ---------------------------------------------------------------------------------
# Calculate the current going into one cell
n_series = battery.pack_config.series
n_parallel = battery.pack_config.parallel
n_total = battery.pack_config.total
Nn = battery.module_config.normal_count
Np = battery.module_config.parallel_count
n_total_module = Nn*Np
if battery_discharge_flag:
I_cell = I_bat/n_parallel
else:
I_cell = -I_bat/n_parallel
# State of charge of the battery
initial_discharge_state = np.dot(I,P_bat) + E_current[0]
SOC_old = np.divide(initial_discharge_state,E_max)
# Make sure things do not break by limiting current, temperature and current
SOC_old[SOC_old < 0.] = 0.
SOC_old[SOC_old > 1.] = 1.
T_cell[T_cell<272.65] = 272.65
T_cell[T_cell>322.65] = 322.65
battery.cell_temperature = T_cell
battery.pack_temperature = T_cell
# ---------------------------------------------------------------------------------
# Compute battery cell temperature
# ---------------------------------------------------------------------------------
# Determine temperature increase
sigma = 139 # Electrical conductivity
n = 1
F = 96485 # C/mol Faraday constant
delta_S = -496.66*(SOC_old)**6 + 1729.4*(SOC_old)**5 + -2278 *(SOC_old)**4 + 1382.2 *(SOC_old)**3 + \
-380.47*(SOC_old)**2 + 46.508*(SOC_old) + -10.692
i_cell = I_cell/electrode_area # current intensity
q_dot_entropy = -(T_cell)*delta_S*i_cell/(n*F)
q_dot_joule = (i_cell**2)/sigma
Q_heat_gen = (q_dot_joule + q_dot_entropy)*As_cell
q_joule_frac = q_dot_joule/(q_dot_joule + q_dot_entropy)
q_entropy_frac = q_dot_entropy/(q_dot_joule + q_dot_entropy)
# Compute cell temperature
T_current = compute_net_generated_battery_heat(n_total,battery,Q_heat_gen,numerics)
# Power going into the battery accounting for resistance losses
P_loss = n_total*Q_heat_gen
P = P_bat - np.abs(P_loss)
# Compute State Variables
V_ul = compute_NMC_cell_state_variables(battery_data,SOC_old,T_cell,I_cell)
# Li-ion battery interal resistance
R_0 = 0.01483*(SOC_old**2) - 0.02518*SOC_old + 0.1036
# Voltage under load:
V_oc = V_ul + (I_cell * R_0)
# ---------------------------------------------------------------------------------
# Compute updates state of battery
# ---------------------------------------------------------------------------------
# Possible Energy going into the battery:
energy_unmodified = np.dot(I,P)
# Available capacity
capacity_available = E_max - battery.current_energy[0]
# How much energy the battery could be overcharged by
delta = energy_unmodified -capacity_available
delta[delta<0.] = 0.
# Power that shouldn't go in
ddelta = np.dot(D,delta)
# Power actually going into the battery
P[P>0.] = P[P>0.] - ddelta[P>0.]
E_bat = np.dot(I,P)
E_bat = np.reshape(E_bat,np.shape(E_current)) #make sure it's consistent
# Add this to the current state
if np.isnan(E_bat).any():
E_bat=np.ones_like(E_bat)*np.max(E_bat)
if np.isnan(E_bat.any()): #all nans; handle this instance
E_bat=np.zeros_like(E_bat)
E_current = E_bat + E_current[0]
# Determine new State of Charge
SOC_new = np.divide(E_current, E_max)
SOC_new[SOC_new<0] = 0.
SOC_new[SOC_new>1] = 1.
DOD_new = 1 - SOC_new
# Determine new charge throughput (the amount of charge gone through the battery)
Q_total = np.atleast_2d(np.hstack(( Q_prior[0] , Q_prior[0] + cumtrapz(I_cell[:,0], x = numerics.time.control_points[:,0])/Units.hr ))).T
# If SOC is negative, voltage under load goes to zero
V_ul[SOC_new < 0.] = 0.
# Pack outputs
battery.current_energy = E_current
battery.cell_temperature = T_current
battery.pack_temperature = T_current
battery.cell_joule_heat_fraction = q_joule_frac
battery.cell_entropy_heat_fraction = q_entropy_frac
battery.resistive_losses = P_loss
battery.load_power = V_ul*n_series*I_bat
battery.current = I_bat
battery.voltage_open_circuit = V_oc*n_series
battery.cell_voltage_open_circuit = V_oc
battery.cell_current = I_cell
battery.cell_charge_throughput = Q_total
battery.heat_energy_generated = Q_heat_gen*n_total_module
battery.internal_resistance = R_0*n_series
battery.state_of_charge = SOC_new
battery.depth_of_discharge = DOD_new
battery.voltage_under_load = V_ul*n_series
battery.cell_voltage_under_load = V_ul
return battery
def append_battery_unknowns(self,segment):
""" Appends unknowns specific to NMC cells which are unpacked from the mission solver and send to the network.
Assumptions:
None
Source:
N/A
Inputs:
segment.state.unknowns.battery_cell_temperature [Kelvin]
segment.state.unknowns.battery_state_of_charge [unitless]
segment.state.unknowns.battery_current [Amperes]
Outputs:
segment.state.conditions.propulsion.battery_cell_temperature [Kelvin]
segment.state.conditions.propulsion.battery_state_of_charge [unitless]
segment.state.conditions.propulsion.battery_current [Amperes]
Properties Used:
N/A
"""
propulsion = segment.state.conditions.propulsion
propulsion.battery_cell_temperature[1:,:] = segment.state.unknowns.battery_cell_temperature[1:,:]
propulsion.battery_state_of_charge[1:,0] = segment.state.unknowns.battery_state_of_charge[:,0]
propulsion.battery_current = segment.state.unknowns.battery_current
return
def append_battery_residuals(self,segment,network):
""" Packs the residuals specific to NMC cells to be sent to the mission solver.
Assumptions:
None
Source:
N/A
Inputs:
segment.state.conditions.propulsion:
battery_state_of_charge [unitless]
battery_cell_temperature [Kelvin]
battery_current [Amperes]
segment.state.unknowns.
battery_state_of_charge [unitless]
battery_cell_temperature [Kelvin]
battery_current [Amperes]
Outputs:
None
Properties Used:
None
"""
SOC_actual = segment.state.conditions.propulsion.battery_state_of_charge
SOC_predict = segment.state.unknowns.battery_state_of_charge
Temp_actual = segment.state.conditions.propulsion.battery_cell_temperature
Temp_predict = segment.state.unknowns.battery_cell_temperature
i_actual = segment.state.conditions.propulsion.battery_current
i_predict = segment.state.unknowns.battery_current
# Return the residuals
segment.state.residuals.network.SOC = SOC_predict - SOC_actual[1:,:]
segment.state.residuals.network.temperature = Temp_predict - Temp_actual
segment.state.residuals.network.current = i_predict - i_actual
return
def append_battery_unknowns_and_residuals_to_segment(self,segment,initial_voltage,
initial_battery_cell_temperature , initial_battery_state_of_charge,
initial_battery_cell_current):
""" Sets up the information that the mission needs to run a mission segment using this network
Assumptions:
None
Source:
N/A
Inputs:
initial_voltage [volts]
initial_battery_cell_temperature [Kelvin]
initial_battery_state_of_charge [unitless]
initial_battery_cell_current [Amperes]
Outputs
None
Properties Used:
N/A
"""
# setup the state
ones_row = segment.state.unknowns.ones_row
ones_row_m1 = segment.state.unknowns.ones_row_m1
parallel = self.pack_config.parallel
segment.state.unknowns.battery_state_of_charge = initial_battery_state_of_charge * ones_row_m1(1)
segment.state.unknowns.battery_cell_temperature = initial_battery_cell_temperature * ones_row(1)
segment.state.unknowns.battery_current = initial_battery_cell_current*parallel * ones_row(1)
return
def compute_voltage(self,state):
""" Computes the voltage of a single NMC cell or a battery pack of NMC cells
Assumptions:
None
Source:
N/A
Inputs:
self - battery data structure [unitless]
state - segment unknowns to define voltage [unitless]
Outputs
V_ul - under-load voltage [volts]
Properties Used:
N/A
"""
# Unpack battery properties
battery = self
battery_data = battery.discharge_performance_map
n_series = battery.pack_config.series
n_parallel = battery.pack_config.parallel
# Unpack segment state properties
SOC = state.conditions.propulsion.battery_state_of_charge
T_cell = state.conditions.propulsion.battery_cell_temperature
I_cell = state.conditions.propulsion.battery_current/n_parallel
# Link Temperature and update
battery.cell_temperature = T_cell
# Compute State Variables
V_ul_cell = compute_NMC_cell_state_variables(battery_data,SOC,T_cell,I_cell)
# Voltage under load
V_ul = n_series*V_ul_cell
return V_ul
def update_battery_state_of_health(self,segment,increment_battery_cycle_day = False):
""" This is an aging model for 18650 lithium-nickel-manganese-cobalt-oxide batteries.
Source:
Schmalstieg, Johannes, et al. "A holistic aging model for Li (NiMnCo) O2
based 18650 lithium-ion batteries." Journal of Power Sources 257 (2014): 325-334.
Assumptions:
None
Inputs:
segment.conditions.propulsion.
battery_cycle_day [unitless]
battery_cell_temperature [Kelvin]
battery_voltage_open_circuit [Volts]
battery_charge_throughput [Amp-hrs]
battery_state_of_charge [unitless]
Outputs:
segment.conditions.propulsion.
battery_capacity_fade_factor (internal resistance growth factor) [unitless]
battery_resistance_growth_factor (capactance (energy) growth factor) [unitless]
Properties Used:
N/A
"""
n_series = self.pack_config.series
SOC = segment.conditions.propulsion.battery_state_of_charge
V_ul = segment.conditions.propulsion.battery_voltage_under_load/n_series
t = segment.conditions.propulsion.battery_cycle_day
Q_prior = segment.conditions.propulsion.battery_cell_charge_throughput[-1,0]
Temp = np.mean(segment.conditions.propulsion.battery_cell_temperature)
# aging model
delta_DOD = abs(SOC[0][0] - SOC[-1][0])
rms_V_ul = np.sqrt(np.mean(V_ul**2))
alpha_cap = (7.542*np.mean(V_ul) - 23.75) * 1E6 * np.exp(-6976/(Temp))
alpha_res = (5.270*np.mean(V_ul) - 16.32) * 1E5 * np.exp(-5986/(Temp))
beta_cap = 7.348E-3 * (rms_V_ul - 3.667)**2 + 7.60E-4 + 4.081E-3*delta_DOD
beta_res = 2.153E-4 * (rms_V_ul - 3.725)**2 - 1.521E-5 + 2.798E-4*delta_DOD
E_fade_factor = 1 - alpha_cap*(t**0.75) - beta_cap*np.sqrt(Q_prior)
R_growth_factor = 1 + alpha_res*(t**0.75) + beta_res*Q_prior
segment.conditions.propulsion.battery_capacity_fade_factor = np.minimum(E_fade_factor,segment.conditions.propulsion.battery_capacity_fade_factor)
segment.conditions.propulsion.battery_resistance_growth_factor = np.maximum(R_growth_factor,segment.conditions.propulsion.battery_resistance_growth_factor)
if increment_battery_cycle_day:
segment.conditions.propulsion.battery_cycle_day += 1 # update battery age by one day
return
def create_discharge_performance_map(battery_raw_data):
""" Creates discharge and charge response surface for
LiNiMnCoO2 battery cells
Source:
N/A
Assumptions:
N/A
Inputs:
Outputs:
battery_data
Properties Used:
N/A
"""
# Process raw data
processed_data = process_raw_data(battery_raw_data)
# Create performance maps
battery_data = create_response_surface(processed_data)
return battery_data
def create_response_surface(processed_data):
battery_map = Data()
amps = np.linspace(0, 8, 5)
temp = np.linspace(0, 50, 6) + 272.65
SOC = np.linspace(0, 1, 15)
battery_map.Voltage = RegularGridInterpolator((amps, temp, SOC), processed_data.Voltage)
battery_map.Temperature = RegularGridInterpolator((amps, temp, SOC), processed_data.Temperature)
return battery_map
def process_raw_data(raw_data):
""" Takes raw data and formats voltage as a function of SOC, current and temperature
Source
N/A
Assumptions:
N/A
Inputs:
raw_Data
Outputs:
procesed_data
Properties Used:
N/A
"""
processed_data = Data()
processed_data.Voltage = np.zeros((5,6,15,2)) # current , operating temperature , SOC vs voltage
processed_data.Temperature = np.zeros((5,6,15,2)) # current , operating temperature , SOC vs temperature
# Reshape Data
raw_data.Voltage
for i, Amps in enumerate(raw_data.Voltage):
for j , Deg in enumerate(Amps):
min_x = 0
max_x = max(Deg[:,0])
x = np.linspace(min_x,max_x,15)
y = np.interp(x,Deg[:,0],Deg[:,1])
vec = np.zeros((15,2))
vec[:,0] = x/max_x
vec[:,1] = y
processed_data.Voltage[i,j,:,:]= vec
for i, Amps in enumerate(raw_data.Temperature):
for j , Deg in enumerate(Amps):
min_x = 0
max_x = max(Deg[:,0])
x = np.linspace(min_x,max_x,15)
y = np.interp(x,Deg[:,0],Deg[:,1])
vec = np.zeros((15,2))
vec[:,0] = x/max_x
vec[:,1] = y
processed_data.Temperature[i,j,:,:]= vec
return processed_data
def load_battery_results():
'''Load experimental raw data of NMC cells
Source:
Automotive Industrial Systems Company of Panasonic Group, Technical Information of
NCR18650G, URL https://www.imrbatteries.com/content/panasonic_ncr18650g.pdf
Assumptions:
N/A
Inputs:
N/A
Outputs:
battery_data
Properties Used:
N/A
'''
ospath = os.path.abspath(__file__)
separator = os.path.sep
rel_path = os.path.dirname(ospath) + separator
return SUAVE.Input_Output.SUAVE.load(rel_path+ 'NMC_Raw_Data.res') | 0.84228 | 0.303796 |
import aiohttp
import pytest
from kopf.clients.auth import APIContext, reauthenticated_request
from kopf.clients.errors import APIClientResponseError, check_response
@reauthenticated_request
async def get_it(url: str, *, context: APIContext) -> None:
response = await context.session.get(url)
await check_response(response)
return await response.json()
@pytest.mark.parametrize('status', [200, 202, 300, 304])
async def test_no_error_on_success(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='{"kind": "Status", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
await get_it(f"http://{hostname}/")
@pytest.mark.parametrize('status', [400, 401, 403, 404, 500, 666])
async def test_replaced_error_raised_with_payload(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='{"kind": "Status", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(aiohttp.ClientResponseError) as err:
await get_it(f"http://{hostname}/")
assert isinstance(err.value, APIClientResponseError)
assert err.value.status == status
assert err.value.message == 'msg'
@pytest.mark.parametrize('status', [400, 500, 666])
async def test_original_error_raised_if_nonjson_payload(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='unparsable json',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(aiohttp.ClientResponseError) as err:
await get_it(f"http://{hostname}/")
assert not isinstance(err.value, APIClientResponseError)
assert err.value.status == status
assert err.value.message == 'boo!'
@pytest.mark.parametrize('status', [400, 500, 666])
async def test_original_error_raised_if_parseable_nonk8s_payload(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='{"kind": "NonStatus", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(aiohttp.ClientResponseError) as err:
await get_it(f"http://{hostname}/")
assert not isinstance(err.value, APIClientResponseError)
assert err.value.status == status
assert err.value.message == 'boo!' | tests/k8s/test_errors.py | import aiohttp
import pytest
from kopf.clients.auth import APIContext, reauthenticated_request
from kopf.clients.errors import APIClientResponseError, check_response
@reauthenticated_request
async def get_it(url: str, *, context: APIContext) -> None:
response = await context.session.get(url)
await check_response(response)
return await response.json()
@pytest.mark.parametrize('status', [200, 202, 300, 304])
async def test_no_error_on_success(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='{"kind": "Status", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
await get_it(f"http://{hostname}/")
@pytest.mark.parametrize('status', [400, 401, 403, 404, 500, 666])
async def test_replaced_error_raised_with_payload(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='{"kind": "Status", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(aiohttp.ClientResponseError) as err:
await get_it(f"http://{hostname}/")
assert isinstance(err.value, APIClientResponseError)
assert err.value.status == status
assert err.value.message == 'msg'
@pytest.mark.parametrize('status', [400, 500, 666])
async def test_original_error_raised_if_nonjson_payload(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='unparsable json',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(aiohttp.ClientResponseError) as err:
await get_it(f"http://{hostname}/")
assert not isinstance(err.value, APIClientResponseError)
assert err.value.status == status
assert err.value.message == 'boo!'
@pytest.mark.parametrize('status', [400, 500, 666])
async def test_original_error_raised_if_parseable_nonk8s_payload(
resp_mocker, aresponses, hostname, resource, status):
resp = aresponses.Response(
status=status,
reason="boo!",
headers={'Content-Type': 'application/json'},
text='{"kind": "NonStatus", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(aiohttp.ClientResponseError) as err:
await get_it(f"http://{hostname}/")
assert not isinstance(err.value, APIClientResponseError)
assert err.value.status == status
assert err.value.message == 'boo!' | 0.4856 | 0.285612 |
import locale
_supported = ['aa_DJ', 'aa_DJ.UTF-8', 'aa_ER', 'aa_<EMAIL>', 'aa_ET',
'af_ZA', 'af_ZA.UTF-8', 'am_ET', 'an_ES', 'an_ES.UTF-8', 'ar_AE',
'ar_AE.UTF-8', 'ar_BH', 'ar_BH.UTF-8', 'ar_DZ', 'ar_DZ.UTF-8',
'ar_EG', 'ar_EG.UTF-8', 'ar_IN', 'ar_IQ', 'ar_IQ.UTF-8', 'ar_JO',
'ar_JO.UTF-8', 'ar_KW', 'ar_KW.UTF-8', 'ar_LB', 'ar_LB.UTF-8',
'ar_LY', 'ar_LY.UTF-8', 'ar_MA', 'ar_MA.UTF-8', 'ar_OM',
'ar_OM.UTF-8', 'ar_QA', 'ar_QA.UTF-8', 'ar_SA', 'ar_SA.UTF-8',
'ar_SD', 'ar_SD.UTF-8', 'ar_SY', 'ar_SY.UTF-8', 'ar_TN',
'ar_TN.UTF-8', 'ar_YE', 'ar_YE.UTF-8', 'as_IN', 'ast_ES',
'ast_ES.UTF-8', 'az_AZ', 'be_BY', 'be_BY.UTF-8', 'be_BY@latin',
'bem_ZM', 'ber_DZ', 'ber_MA', 'bg_BG', 'bg_BG.UTF-8', 'bho_IN',
'bn_BD', 'bn_IN', 'bo_CN', 'bo_IN', 'br_FR', 'br_FR.UTF-8',
'br_FR@euro', 'brx_IN', 'bs_BA', 'bs_BA.UTF-8', 'byn_ER', 'ca_AD',
'ca_AD.UTF-8', 'ca_ES', 'ca_ES.UTF-8', '<EMAIL>', 'ca_FR',
'ca_FR.UTF-8', 'ca_IT', 'ca_IT.UTF-8', 'crh_UA', 'cs_CZ',
'cs_CZ.UTF-8', 'csb_PL', 'cv_RU', 'cy_GB', 'cy_GB.UTF-8', 'da_DK',
'da_DK.UTF-8', 'de_AT', 'de_AT.UTF-8', 'de_<EMAIL>', 'de_BE',
'de_BE.UTF-8', 'de_BE@euro', 'de_CH', 'de_CH.UTF-8', 'de_DE',
'de_DE.UTF-8', 'de_<EMAIL>', 'de_LU', 'de_LU.UTF-8', 'de_LU@euro',
'dv_MV', 'dz_BT', 'el_CY', 'el_CY.UTF-8', 'el_GR', 'el_GR.UTF-8',
'en_AG', 'en_AU', 'en_AU.UTF-8', 'en_BW', 'en_BW.UTF-8', 'en_CA',
'en_CA.UTF-8', 'en_DK', 'en_DK.UTF-8', 'en_GB', 'en_GB.UTF-8',
'en_HK', 'en_HK.UTF-8', 'en_IE', 'en_IE.UTF-8', 'en_IE@euro', 'en_IN',
'en_NG', 'en_NZ', 'en_NZ.UTF-8', 'en_PH', 'en_PH.UTF-8', 'en_SG',
'en_SG.UTF-8', 'en_US', 'en_US.UTF-8', 'en_ZA', 'en_ZA.UTF-8',
'en_ZM', 'en_ZW', 'en_ZW.UTF-8', 'es_AR', 'es_AR.UTF-8', 'es_BO',
'es_BO.UTF-8', 'es_CL', 'es_CL.UTF-8', 'es_CO', 'es_CO.UTF-8',
'es_CR', 'es_CR.UTF-8', 'es_CU', 'es_DO', 'es_DO.UTF-8', 'es_EC',
'es_EC.UTF-8', 'es_ES', 'es_ES.UTF-8', 'es_ES@euro', 'es_GT',
'es_GT.UTF-8', 'es_HN', 'es_HN.UTF-8', 'es_MX', 'es_MX.UTF-8',
'es_NI', 'es_NI.UTF-8', 'es_PA', 'es_PA.UTF-8', 'es_PE',
'es_PE.UTF-8', 'es_PR', 'es_PR.UTF-8', 'es_PY', 'es_PY.UTF-8',
'es_SV', 'es_SV.UTF-8', 'es_US', 'es_US.UTF-8', 'es_UY',
'es_UY.UTF-8', 'es_VE', 'es_VE.UTF-8', 'et_EE', 'et_EE.ISO-8859-15',
'et_EE.UTF-8', 'eu_ES', 'eu_ES.UTF-8', 'eu_ES@euro', 'fa_IR', 'ff_SN',
'fi_FI', 'fi_FI.UTF-8', 'fi_FI@euro', 'fil_PH', 'fo_FO',
'fo_FO.UTF-8', 'fr_BE', 'fr_BE.UTF-8', 'fr_BE@euro', 'fr_CA',
'fr_CA.UTF-8', 'fr_CH', 'fr_CH.UTF-8', 'fr_FR', 'fr_FR.UTF-8',
'fr_FR@euro', 'fr_LU', 'fr_LU.UTF-8', 'fr_LU@euro', 'fur_IT', 'fy_DE',
'fy_NL', 'ga_IE', 'ga_IE.UTF-8', 'ga_IE@euro', 'gd_GB', 'gd_GB.UTF-8',
'gez_ER', '<EMAIL>', 'gez_ET', '<EMAIL>', 'gl_ES',
'gl_ES.UTF-8', 'gl_ES@euro', 'gu_IN', 'gv_GB', 'gv_GB.UTF-8', 'ha_NG',
'he_IL', 'he_IL.UTF-8', 'hi_IN', 'hne_IN', 'hr_HR', 'hr_HR.UTF-8',
'hsb_DE', 'hsb_DE.UTF-8', 'ht_HT', 'hu_HU', 'hu_HU.UTF-8', 'hy_AM',
'hy_AM.ARMSCII-8', 'id_ID', 'id_ID.UTF-8', 'ig_NG', 'ik_CA', 'is_IS',
'is_IS.UTF-8', 'it_CH', 'it_CH.UTF-8', 'it_IT', 'it_IT.UTF-8',
'it_IT@euro', 'iu_CA', 'iw_IL', 'iw_IL.UTF-8', 'ja_JP.EUC-JP',
'ja_JP.UTF-8', 'ka_GE', 'ka_GE.UTF-8', 'kk_KZ', 'kk_KZ.UTF-8',
'kl_GL', 'kl_GL.UTF-8', 'km_KH', 'kn_IN', 'ko_KR.EUC-KR',
'ko_KR.UTF-8', 'kok_IN', 'ks_IN', 'ks_IN@<EMAIL>', 'ku_TR',
'ku_TR.UTF-8', 'kw_GB', 'kw_GB.UTF-8', 'ky_KG', 'lb_LU', 'lg_UG',
'lg_UG.UTF-8', 'li_BE', 'li_NL', 'lij_IT', 'lo_LA', 'lt_LT',
'lt_LT.UTF-8', 'lv_LV', 'lv_LV.UTF-8', 'mai_IN', 'mg_MG',
'mg_MG.UTF-8', 'mhr_RU', 'mi_NZ', 'mi_NZ.UTF-8', 'mk_MK',
'mk_MK.UTF-8', 'ml_IN', 'mn_MN', 'mr_IN', 'ms_MY', 'ms_MY.UTF-8',
'mt_MT', 'mt_MT.UTF-8', 'my_MM', 'nan_TW@latin', 'nb_NO',
'nb_NO.UTF-8', 'nds_DE', 'nds_NL', 'ne_NP', 'nl_AW', 'nl_BE',
'nl_BE.UTF-8', 'nl_BE@euro', 'nl_NL', 'nl_NL.UTF-8', 'nl_NL@euro',
'nn_NO', 'nn_NO.UTF-8', 'nr_ZA', 'nso_ZA', 'oc_FR', 'oc_FR.UTF-8',
'om_ET', 'om_KE', 'om_KE.UTF-8', 'or_IN', 'os_RU', 'pa_IN', 'pa_PK',
'pap_AN', 'pl_PL', 'pl_PL.UTF-8', 'ps_AF', 'pt_BR', 'pt_BR.UTF-8',
'pt_PT', 'pt_PT.UTF-8', 'pt_PT@euro', 'ro_RO', 'ro_RO.UTF-8', 'ru_RU',
'ru_RU.KOI8-R', 'ru_RU.UTF-8', 'ru_UA', 'ru_UA.UTF-8', 'rw_RW',
'sa_IN', 'sc_IT', 'sd_IN', 'sd_IN@dev<EMAIL>', 'se_NO', 'shs_CA',
'si_LK', 'sid_ET', 'sk_SK', 'sk_SK.UTF-8', 'sl_SI', 'sl_SI.UTF-8',
'so_DJ', 'so_DJ.UTF-8', 'so_ET', 'so_KE', 'so_KE.UTF-8', 'so_SO',
'so_SO.UTF-8', 'sq_AL', 'sq_AL.UTF-8', 'sq_MK', 'sr_ME', 'sr_RS',
'sr_RS@latin', 'ss_ZA', 'st_ZA', 'st_ZA.UTF-8', 'sv_FI',
'sv_FI.UTF-8', 'sv_FI@euro', 'sv_SE', 'sv_SE.UTF-8', 'sw_KE', 'sw_TZ',
'ta_IN', 'ta_LK', 'te_IN', 'tg_TJ', 'tg_TJ.UTF-8', 'th_TH',
'th_TH.UTF-8', 'ti_ER', 'ti_ET', 'tig_ER', 'tk_TM', 'tl_PH',
'tl_PH.UTF-8', 'tn_ZA', 'tr_CY', 'tr_CY.UTF-8', 'tr_TR',
'tr_TR.UTF-8', 'ts_ZA', 'tt_RU', 'tt_RU@iqtelif', 'ug_CN', 'uk_UA',
'uk_UA.UTF-8', 'unm_US', 'ur_IN', 'ur_PK', 'uz_UZ', 'uz_UZ@cyrillic',
've_ZA', 'vi_VN', 'vi_VN.TCVN', 'wa_BE', 'wa_BE.UTF-8', 'wa_BE@euro',
'wae_CH', 'wal_ET', 'wo_SN', 'xh_ZA', 'xh_ZA.UTF-8', 'yi_US',
'yi_US.UTF-8', 'yo_NG', 'yue_HK', 'zh_CN', 'zh_CN.GB18030',
'zh_CN.GBK', 'zh_CN.UTF-8', 'zh_HK', 'zh_HK.UTF-8', 'zh_SG',
'zh_SG.GBK', 'zh_SG.UTF-8', 'zh_TW', 'zh_TW.EUC-TW', 'zh_TW.UTF-8',
'zu_ZA', 'zu_ZA.UTF-8']
lang2locale = {
"de": ("de_DE.UTF-8", "de_DE"),
"en": ("en_US.UTF-8",)}
current_lang = None
def set_locale_from_lang(lang):
global current_lang
if lang == current_lang:
return
prefix = lang + u"_"
canonical = "%s_%s" % (lang, lang.upper())
candidates = sorted(set([x for x in [canonical, canonical + ".UTF-8"] + _supported if x.startswith(prefix)]),
key=lambda x: (x.endswith("UTF-8"), x.startswith(canonical)),
reverse=True)
for x in candidates:
try:
locale.setlocale(locale.LC_NUMERIC, x)
current_lang = lang
print "set locale to %r based on the language %r" % (x, current_lang)
return
except locale.Error:
pass
print "failed to set locale for language %r, tried %r" % (lang, candidates) | mwlib/_locale.py | import locale
_supported = ['aa_DJ', 'aa_DJ.UTF-8', 'aa_ER', 'aa_<EMAIL>', 'aa_ET',
'af_ZA', 'af_ZA.UTF-8', 'am_ET', 'an_ES', 'an_ES.UTF-8', 'ar_AE',
'ar_AE.UTF-8', 'ar_BH', 'ar_BH.UTF-8', 'ar_DZ', 'ar_DZ.UTF-8',
'ar_EG', 'ar_EG.UTF-8', 'ar_IN', 'ar_IQ', 'ar_IQ.UTF-8', 'ar_JO',
'ar_JO.UTF-8', 'ar_KW', 'ar_KW.UTF-8', 'ar_LB', 'ar_LB.UTF-8',
'ar_LY', 'ar_LY.UTF-8', 'ar_MA', 'ar_MA.UTF-8', 'ar_OM',
'ar_OM.UTF-8', 'ar_QA', 'ar_QA.UTF-8', 'ar_SA', 'ar_SA.UTF-8',
'ar_SD', 'ar_SD.UTF-8', 'ar_SY', 'ar_SY.UTF-8', 'ar_TN',
'ar_TN.UTF-8', 'ar_YE', 'ar_YE.UTF-8', 'as_IN', 'ast_ES',
'ast_ES.UTF-8', 'az_AZ', 'be_BY', 'be_BY.UTF-8', 'be_BY@latin',
'bem_ZM', 'ber_DZ', 'ber_MA', 'bg_BG', 'bg_BG.UTF-8', 'bho_IN',
'bn_BD', 'bn_IN', 'bo_CN', 'bo_IN', 'br_FR', 'br_FR.UTF-8',
'br_FR@euro', 'brx_IN', 'bs_BA', 'bs_BA.UTF-8', 'byn_ER', 'ca_AD',
'ca_AD.UTF-8', 'ca_ES', 'ca_ES.UTF-8', '<EMAIL>', 'ca_FR',
'ca_FR.UTF-8', 'ca_IT', 'ca_IT.UTF-8', 'crh_UA', 'cs_CZ',
'cs_CZ.UTF-8', 'csb_PL', 'cv_RU', 'cy_GB', 'cy_GB.UTF-8', 'da_DK',
'da_DK.UTF-8', 'de_AT', 'de_AT.UTF-8', 'de_<EMAIL>', 'de_BE',
'de_BE.UTF-8', 'de_BE@euro', 'de_CH', 'de_CH.UTF-8', 'de_DE',
'de_DE.UTF-8', 'de_<EMAIL>', 'de_LU', 'de_LU.UTF-8', 'de_LU@euro',
'dv_MV', 'dz_BT', 'el_CY', 'el_CY.UTF-8', 'el_GR', 'el_GR.UTF-8',
'en_AG', 'en_AU', 'en_AU.UTF-8', 'en_BW', 'en_BW.UTF-8', 'en_CA',
'en_CA.UTF-8', 'en_DK', 'en_DK.UTF-8', 'en_GB', 'en_GB.UTF-8',
'en_HK', 'en_HK.UTF-8', 'en_IE', 'en_IE.UTF-8', 'en_IE@euro', 'en_IN',
'en_NG', 'en_NZ', 'en_NZ.UTF-8', 'en_PH', 'en_PH.UTF-8', 'en_SG',
'en_SG.UTF-8', 'en_US', 'en_US.UTF-8', 'en_ZA', 'en_ZA.UTF-8',
'en_ZM', 'en_ZW', 'en_ZW.UTF-8', 'es_AR', 'es_AR.UTF-8', 'es_BO',
'es_BO.UTF-8', 'es_CL', 'es_CL.UTF-8', 'es_CO', 'es_CO.UTF-8',
'es_CR', 'es_CR.UTF-8', 'es_CU', 'es_DO', 'es_DO.UTF-8', 'es_EC',
'es_EC.UTF-8', 'es_ES', 'es_ES.UTF-8', 'es_ES@euro', 'es_GT',
'es_GT.UTF-8', 'es_HN', 'es_HN.UTF-8', 'es_MX', 'es_MX.UTF-8',
'es_NI', 'es_NI.UTF-8', 'es_PA', 'es_PA.UTF-8', 'es_PE',
'es_PE.UTF-8', 'es_PR', 'es_PR.UTF-8', 'es_PY', 'es_PY.UTF-8',
'es_SV', 'es_SV.UTF-8', 'es_US', 'es_US.UTF-8', 'es_UY',
'es_UY.UTF-8', 'es_VE', 'es_VE.UTF-8', 'et_EE', 'et_EE.ISO-8859-15',
'et_EE.UTF-8', 'eu_ES', 'eu_ES.UTF-8', 'eu_ES@euro', 'fa_IR', 'ff_SN',
'fi_FI', 'fi_FI.UTF-8', 'fi_FI@euro', 'fil_PH', 'fo_FO',
'fo_FO.UTF-8', 'fr_BE', 'fr_BE.UTF-8', 'fr_BE@euro', 'fr_CA',
'fr_CA.UTF-8', 'fr_CH', 'fr_CH.UTF-8', 'fr_FR', 'fr_FR.UTF-8',
'fr_FR@euro', 'fr_LU', 'fr_LU.UTF-8', 'fr_LU@euro', 'fur_IT', 'fy_DE',
'fy_NL', 'ga_IE', 'ga_IE.UTF-8', 'ga_IE@euro', 'gd_GB', 'gd_GB.UTF-8',
'gez_ER', '<EMAIL>', 'gez_ET', '<EMAIL>', 'gl_ES',
'gl_ES.UTF-8', 'gl_ES@euro', 'gu_IN', 'gv_GB', 'gv_GB.UTF-8', 'ha_NG',
'he_IL', 'he_IL.UTF-8', 'hi_IN', 'hne_IN', 'hr_HR', 'hr_HR.UTF-8',
'hsb_DE', 'hsb_DE.UTF-8', 'ht_HT', 'hu_HU', 'hu_HU.UTF-8', 'hy_AM',
'hy_AM.ARMSCII-8', 'id_ID', 'id_ID.UTF-8', 'ig_NG', 'ik_CA', 'is_IS',
'is_IS.UTF-8', 'it_CH', 'it_CH.UTF-8', 'it_IT', 'it_IT.UTF-8',
'it_IT@euro', 'iu_CA', 'iw_IL', 'iw_IL.UTF-8', 'ja_JP.EUC-JP',
'ja_JP.UTF-8', 'ka_GE', 'ka_GE.UTF-8', 'kk_KZ', 'kk_KZ.UTF-8',
'kl_GL', 'kl_GL.UTF-8', 'km_KH', 'kn_IN', 'ko_KR.EUC-KR',
'ko_KR.UTF-8', 'kok_IN', 'ks_IN', 'ks_IN@<EMAIL>', 'ku_TR',
'ku_TR.UTF-8', 'kw_GB', 'kw_GB.UTF-8', 'ky_KG', 'lb_LU', 'lg_UG',
'lg_UG.UTF-8', 'li_BE', 'li_NL', 'lij_IT', 'lo_LA', 'lt_LT',
'lt_LT.UTF-8', 'lv_LV', 'lv_LV.UTF-8', 'mai_IN', 'mg_MG',
'mg_MG.UTF-8', 'mhr_RU', 'mi_NZ', 'mi_NZ.UTF-8', 'mk_MK',
'mk_MK.UTF-8', 'ml_IN', 'mn_MN', 'mr_IN', 'ms_MY', 'ms_MY.UTF-8',
'mt_MT', 'mt_MT.UTF-8', 'my_MM', 'nan_TW@latin', 'nb_NO',
'nb_NO.UTF-8', 'nds_DE', 'nds_NL', 'ne_NP', 'nl_AW', 'nl_BE',
'nl_BE.UTF-8', 'nl_BE@euro', 'nl_NL', 'nl_NL.UTF-8', 'nl_NL@euro',
'nn_NO', 'nn_NO.UTF-8', 'nr_ZA', 'nso_ZA', 'oc_FR', 'oc_FR.UTF-8',
'om_ET', 'om_KE', 'om_KE.UTF-8', 'or_IN', 'os_RU', 'pa_IN', 'pa_PK',
'pap_AN', 'pl_PL', 'pl_PL.UTF-8', 'ps_AF', 'pt_BR', 'pt_BR.UTF-8',
'pt_PT', 'pt_PT.UTF-8', 'pt_PT@euro', 'ro_RO', 'ro_RO.UTF-8', 'ru_RU',
'ru_RU.KOI8-R', 'ru_RU.UTF-8', 'ru_UA', 'ru_UA.UTF-8', 'rw_RW',
'sa_IN', 'sc_IT', 'sd_IN', 'sd_IN@dev<EMAIL>', 'se_NO', 'shs_CA',
'si_LK', 'sid_ET', 'sk_SK', 'sk_SK.UTF-8', 'sl_SI', 'sl_SI.UTF-8',
'so_DJ', 'so_DJ.UTF-8', 'so_ET', 'so_KE', 'so_KE.UTF-8', 'so_SO',
'so_SO.UTF-8', 'sq_AL', 'sq_AL.UTF-8', 'sq_MK', 'sr_ME', 'sr_RS',
'sr_RS@latin', 'ss_ZA', 'st_ZA', 'st_ZA.UTF-8', 'sv_FI',
'sv_FI.UTF-8', 'sv_FI@euro', 'sv_SE', 'sv_SE.UTF-8', 'sw_KE', 'sw_TZ',
'ta_IN', 'ta_LK', 'te_IN', 'tg_TJ', 'tg_TJ.UTF-8', 'th_TH',
'th_TH.UTF-8', 'ti_ER', 'ti_ET', 'tig_ER', 'tk_TM', 'tl_PH',
'tl_PH.UTF-8', 'tn_ZA', 'tr_CY', 'tr_CY.UTF-8', 'tr_TR',
'tr_TR.UTF-8', 'ts_ZA', 'tt_RU', 'tt_RU@iqtelif', 'ug_CN', 'uk_UA',
'uk_UA.UTF-8', 'unm_US', 'ur_IN', 'ur_PK', 'uz_UZ', 'uz_UZ@cyrillic',
've_ZA', 'vi_VN', 'vi_VN.TCVN', 'wa_BE', 'wa_BE.UTF-8', 'wa_BE@euro',
'wae_CH', 'wal_ET', 'wo_SN', 'xh_ZA', 'xh_ZA.UTF-8', 'yi_US',
'yi_US.UTF-8', 'yo_NG', 'yue_HK', 'zh_CN', 'zh_CN.GB18030',
'zh_CN.GBK', 'zh_CN.UTF-8', 'zh_HK', 'zh_HK.UTF-8', 'zh_SG',
'zh_SG.GBK', 'zh_SG.UTF-8', 'zh_TW', 'zh_TW.EUC-TW', 'zh_TW.UTF-8',
'zu_ZA', 'zu_ZA.UTF-8']
lang2locale = {
"de": ("de_DE.UTF-8", "de_DE"),
"en": ("en_US.UTF-8",)}
current_lang = None
def set_locale_from_lang(lang):
global current_lang
if lang == current_lang:
return
prefix = lang + u"_"
canonical = "%s_%s" % (lang, lang.upper())
candidates = sorted(set([x for x in [canonical, canonical + ".UTF-8"] + _supported if x.startswith(prefix)]),
key=lambda x: (x.endswith("UTF-8"), x.startswith(canonical)),
reverse=True)
for x in candidates:
try:
locale.setlocale(locale.LC_NUMERIC, x)
current_lang = lang
print "set locale to %r based on the language %r" % (x, current_lang)
return
except locale.Error:
pass
print "failed to set locale for language %r, tried %r" % (lang, candidates) | 0.269133 | 0.056314 |
import datetime
import json
import os
from typing import List
from tabulate import tabulate
from testcase import TestCase
from testcase_file import TestCaseFile
def test_case_to_json(o: TestCase):
return o.to_json()
class Report:
def __init__(self, test_case_files: List[TestCaseFile], log_dir: str):
self.test_case_files: List[TestCaseFile] = test_case_files
self.log_dir = log_dir
self.start_time: datetime.datetime = None
self.end_time: datetime.datetime = None
self.duration:int = 0
self.total: int = 0
self.passed: int = 0
self.failed: int = 0
self.skipped: int = 0
self.failed_test_cases: List[TestCase] = []
self.generate_stats()
self.generate_json_report()
self.generate_summary()
def generate_stats(self):
# get the start and end time from the first and the last file
self.start_time = self.test_case_files[0].start_time
self.end_time = self.test_case_files[-1].end_time
if self.end_time:
self.duration = (self.end_time - self.start_time).total_seconds()
for tc_file in self.test_case_files:
for tc in tc_file.get_test_cases():
self.total += 1
if tc.status == "passed":
self.passed += 1
elif tc.status == "":
self.skipped += 1
self.failed_test_cases.append(tc)
else:
self.failed += 1
self.failed_test_cases.append(tc)
def generate_json_report(self):
json_file_name: str = os.path.join(self.log_dir, 'report.json')
with open(json_file_name, 'w') as fd:
data = {
'summary': {
'total': self.total,
'passed': self.passed,
'failed': self.failed,
'skipped': self.skipped,
'start_time': str(self.start_time),
'end_time': str(self.end_time),
'duration': self.duration,
'log_dir': self.log_dir
},
'test_case_files': self.test_case_files
}
json.dump(data, fd, default=test_case_to_json, indent=4)
def generate_summary(self):
data = f"Total: {self.total}, Passed: {self.passed}, Failed: {self.failed}, Skipped: {self.skipped}\n"
data += f"Start Time: {self.start_time}, End Time: {self.end_time}\n"
data += f"Duration: {self.duration} secs\n"
if len(self.failed_test_cases) > 0:
data += "\nFailed/Skipped Test Cases:\n"
ftc_data = []
for ftc in self.failed_test_cases:
ftc_data.append([ftc.full_name, ftc.error or "Skipped"])
data += tabulate(ftc_data, headers=['Test Case', 'Reason'], tablefmt="grid")
data += "\n"
summary_file = os.path.join(self.log_dir, 'summary.txt')
with open(summary_file, 'w') as fd:
fd.write(data)
print("\nExecution Summary")
print("-----------------")
print(data) | report.py | import datetime
import json
import os
from typing import List
from tabulate import tabulate
from testcase import TestCase
from testcase_file import TestCaseFile
def test_case_to_json(o: TestCase):
return o.to_json()
class Report:
def __init__(self, test_case_files: List[TestCaseFile], log_dir: str):
self.test_case_files: List[TestCaseFile] = test_case_files
self.log_dir = log_dir
self.start_time: datetime.datetime = None
self.end_time: datetime.datetime = None
self.duration:int = 0
self.total: int = 0
self.passed: int = 0
self.failed: int = 0
self.skipped: int = 0
self.failed_test_cases: List[TestCase] = []
self.generate_stats()
self.generate_json_report()
self.generate_summary()
def generate_stats(self):
# get the start and end time from the first and the last file
self.start_time = self.test_case_files[0].start_time
self.end_time = self.test_case_files[-1].end_time
if self.end_time:
self.duration = (self.end_time - self.start_time).total_seconds()
for tc_file in self.test_case_files:
for tc in tc_file.get_test_cases():
self.total += 1
if tc.status == "passed":
self.passed += 1
elif tc.status == "":
self.skipped += 1
self.failed_test_cases.append(tc)
else:
self.failed += 1
self.failed_test_cases.append(tc)
def generate_json_report(self):
json_file_name: str = os.path.join(self.log_dir, 'report.json')
with open(json_file_name, 'w') as fd:
data = {
'summary': {
'total': self.total,
'passed': self.passed,
'failed': self.failed,
'skipped': self.skipped,
'start_time': str(self.start_time),
'end_time': str(self.end_time),
'duration': self.duration,
'log_dir': self.log_dir
},
'test_case_files': self.test_case_files
}
json.dump(data, fd, default=test_case_to_json, indent=4)
def generate_summary(self):
data = f"Total: {self.total}, Passed: {self.passed}, Failed: {self.failed}, Skipped: {self.skipped}\n"
data += f"Start Time: {self.start_time}, End Time: {self.end_time}\n"
data += f"Duration: {self.duration} secs\n"
if len(self.failed_test_cases) > 0:
data += "\nFailed/Skipped Test Cases:\n"
ftc_data = []
for ftc in self.failed_test_cases:
ftc_data.append([ftc.full_name, ftc.error or "Skipped"])
data += tabulate(ftc_data, headers=['Test Case', 'Reason'], tablefmt="grid")
data += "\n"
summary_file = os.path.join(self.log_dir, 'summary.txt')
with open(summary_file, 'w') as fd:
fd.write(data)
print("\nExecution Summary")
print("-----------------")
print(data) | 0.320396 | 0.306864 |
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras.utils import np_utils
# load ascii text and covert to lowercase
filename = "Shelock Holmes-Hounds of Baskeville.txt"
raw_text = open(filename, 'r', encoding='utf-8').read()
raw_text = raw_text.lower()
# create mapping of unique chars to integers
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print ("Total Characters: ", n_chars)
print ("Total Vocab: ", n_vocab)
# prepare the dataset of input to output pairs encoded as integers
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print ("Total Patterns: ", n_patterns)
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# define the LSTM model
from keras.optimizers import adam
optimizer=adam(learning_rate=0.0001)
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# define the checkpoint
filepath="drive/My Drive/weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=2, verbose=0, mode='min', baseline=None, restore_best_weights=False)
callbacks_list = [checkpoint,early_stopping]
# fit the model
model.fit(X, y, epochs=50, batch_size=64, callbacks=callbacks_list)
# Load Larger LSTM network and generate text
import sys
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
# load ascii text and covert to lowercase
filename = "Shelock Holmes-Hounds of Baskeville.txt"
raw_text = open(filename, 'r', encoding='utf-8').read()
raw_text = raw_text.lower()
# create mapping of unique chars to integers, and a reverse mapping
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print ("Total Characters: ", n_chars)
print ("Total Vocab: ", n_vocab)
# prepare the dataset of input to output pairs encoded as integers
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print ("Total Patterns: ", n_patterns)
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# define the LSTM model
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
# load the network weights
filename = "/content/drive/My Drive/weights-improvement-11-1.3733-bigger.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
for i in range(25):
# pick a random seed
start = numpy.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print ("Seed:")
print ("\"", ''.join([int_to_char[value] for value in pattern]), "\"")
# generate characters
for i in range(10):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
sys.stdout.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
print ("\nDone.") | Book-Generation /Code.py | import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras.utils import np_utils
# load ascii text and covert to lowercase
filename = "Shelock Holmes-Hounds of Baskeville.txt"
raw_text = open(filename, 'r', encoding='utf-8').read()
raw_text = raw_text.lower()
# create mapping of unique chars to integers
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print ("Total Characters: ", n_chars)
print ("Total Vocab: ", n_vocab)
# prepare the dataset of input to output pairs encoded as integers
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print ("Total Patterns: ", n_patterns)
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# define the LSTM model
from keras.optimizers import adam
optimizer=adam(learning_rate=0.0001)
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# define the checkpoint
filepath="drive/My Drive/weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=2, verbose=0, mode='min', baseline=None, restore_best_weights=False)
callbacks_list = [checkpoint,early_stopping]
# fit the model
model.fit(X, y, epochs=50, batch_size=64, callbacks=callbacks_list)
# Load Larger LSTM network and generate text
import sys
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
# load ascii text and covert to lowercase
filename = "Shelock Holmes-Hounds of Baskeville.txt"
raw_text = open(filename, 'r', encoding='utf-8').read()
raw_text = raw_text.lower()
# create mapping of unique chars to integers, and a reverse mapping
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print ("Total Characters: ", n_chars)
print ("Total Vocab: ", n_vocab)
# prepare the dataset of input to output pairs encoded as integers
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print ("Total Patterns: ", n_patterns)
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# define the LSTM model
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
# load the network weights
filename = "/content/drive/My Drive/weights-improvement-11-1.3733-bigger.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
for i in range(25):
# pick a random seed
start = numpy.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print ("Seed:")
print ("\"", ''.join([int_to_char[value] for value in pattern]), "\"")
# generate characters
for i in range(10):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
sys.stdout.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
print ("\nDone.") | 0.793306 | 0.346818 |
import os
import time
import requests
import sys
import subprocess
try:
import ipapi
except ImportError:
os.system("pip install ipapi")
opt = "\nHack/> "
ip = "\nEnter host: "
def cls():
os.system("clear")
class color:
org = '\033[33m'
End = '\033[0m'
def main():
cls()
print("--------[ Hack-Ipapi ]--------\n")
print("Version: 1.2.0\n")
print("{1}.Port Scan")
print("{2}.PingTest")
print("{3}.Server Location")
print("{4}.whois")
print("{5}.Geoip")
print("{99}.Exit")
choose = input(opt)
if choose == '1':
portscan()
elif choose == '2':
pingtest()
elif choose == '3':
location()
elif choose == '4':
whois()
elif choose == '5':
geoip()
elif choose == '99':
ext()
else:
main()
def portscan():
cls()
host = input(ip)
attack_1 = requests.get(f"https://api.hackertarget.com/nmap/?q={host}").text
print(attack_1)
try1()
def try1():
try_to_portscan = input("\nDo you want to try again? [y/n] ")
if try_to_portscan == 'y':
portscan()
elif try_to_portscan == 'n':
main()
else:
try1()
def pingtest():
cls()
host = input(ip)
packet = input("\nEnter packet: ")
attack_2 = subprocess.getoutput(f"ping -w {packet} {host}")
print(color.org + attack_2 + color.End)
try2()
def try2():
try_to_pingtest = input("\nDo you want to try again? [y/n] ")
if try_to_pingtest == 'y':
pingtest()
elif try_to_pingtest == 'n':
main()
else:
try2()
def location():
cls()
host = input(ip)
search = ipapi.location(ip=host,key=None)
print("------------------------\n")
print("Ip: " + search["ip"])
print("org: " + search["org"])
print("------------------------\n")
try3()
def try3():
try_to_location = input("\nDo you want to try again? [y/n] ")
if try_to_location == 'y':
location()
elif try_to_location == 'n':
main()
else:
try3()
def whois():
cls()
host = input(ip)
attack_4 = requests.get(f"https://api.hackertarget.com/whois/?q={host}").text
print(attack_4)
try4()
def try4():
try_to_whois = input("\nDo you want try again? [y/n] ")
if try_to_whois == 'y':
whois()
elif try_to_whois == 'n':
main()
else:
try4()
def geoip():
cls()
host = input(ip)
attack_5 = requests.get(f"https://api.hackertarget.com/geoip/?q={host}").text
print(attack_5)
try5()
def try5():
try_to_geoip = input("\nDo you want to try again? [y/n] ")
if try_to_geoip == 'y':
geoip()
elif try_to_geoip == 'n':
main()
else:
try5()
def ext():
cls()
print("\nExiting...")
sys.exit()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\nCtrl + C")
print("\nExiting...")
sys.exit() | hack.py | import os
import time
import requests
import sys
import subprocess
try:
import ipapi
except ImportError:
os.system("pip install ipapi")
opt = "\nHack/> "
ip = "\nEnter host: "
def cls():
os.system("clear")
class color:
org = '\033[33m'
End = '\033[0m'
def main():
cls()
print("--------[ Hack-Ipapi ]--------\n")
print("Version: 1.2.0\n")
print("{1}.Port Scan")
print("{2}.PingTest")
print("{3}.Server Location")
print("{4}.whois")
print("{5}.Geoip")
print("{99}.Exit")
choose = input(opt)
if choose == '1':
portscan()
elif choose == '2':
pingtest()
elif choose == '3':
location()
elif choose == '4':
whois()
elif choose == '5':
geoip()
elif choose == '99':
ext()
else:
main()
def portscan():
cls()
host = input(ip)
attack_1 = requests.get(f"https://api.hackertarget.com/nmap/?q={host}").text
print(attack_1)
try1()
def try1():
try_to_portscan = input("\nDo you want to try again? [y/n] ")
if try_to_portscan == 'y':
portscan()
elif try_to_portscan == 'n':
main()
else:
try1()
def pingtest():
cls()
host = input(ip)
packet = input("\nEnter packet: ")
attack_2 = subprocess.getoutput(f"ping -w {packet} {host}")
print(color.org + attack_2 + color.End)
try2()
def try2():
try_to_pingtest = input("\nDo you want to try again? [y/n] ")
if try_to_pingtest == 'y':
pingtest()
elif try_to_pingtest == 'n':
main()
else:
try2()
def location():
cls()
host = input(ip)
search = ipapi.location(ip=host,key=None)
print("------------------------\n")
print("Ip: " + search["ip"])
print("org: " + search["org"])
print("------------------------\n")
try3()
def try3():
try_to_location = input("\nDo you want to try again? [y/n] ")
if try_to_location == 'y':
location()
elif try_to_location == 'n':
main()
else:
try3()
def whois():
cls()
host = input(ip)
attack_4 = requests.get(f"https://api.hackertarget.com/whois/?q={host}").text
print(attack_4)
try4()
def try4():
try_to_whois = input("\nDo you want try again? [y/n] ")
if try_to_whois == 'y':
whois()
elif try_to_whois == 'n':
main()
else:
try4()
def geoip():
cls()
host = input(ip)
attack_5 = requests.get(f"https://api.hackertarget.com/geoip/?q={host}").text
print(attack_5)
try5()
def try5():
try_to_geoip = input("\nDo you want to try again? [y/n] ")
if try_to_geoip == 'y':
geoip()
elif try_to_geoip == 'n':
main()
else:
try5()
def ext():
cls()
print("\nExiting...")
sys.exit()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\nCtrl + C")
print("\nExiting...")
sys.exit() | 0.06271 | 0.10004 |
""" utils """
import os
import sys
import time
import math
import json
import stat
from datetime import datetime
from collections import Counter
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import load_checkpoint, load_param_into_net, save_checkpoint, Tensor, Parameter
from mindspore.common.parameter import ParameterTuple
from mindspore.train.callback import Callback
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from src.transform import xyxy2xywh
def linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr):
"""Linear learning rate."""
lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps)
lr = float(init_lr) + lr_inc * current_step
return lr
def warmup_step_lr(lr, lr_epochs, steps_per_epoch, warmup_epochs, max_epoch, gamma=0.1):
"""Warmup step learning rate."""
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
milestones = lr_epochs
milestones_steps = []
for milestone in milestones:
milestones_step = milestone * steps_per_epoch
milestones_steps.append(milestones_step)
lr_each_step = []
lr = base_lr
milestones_steps_counter = Counter(milestones_steps)
for i in range(total_steps):
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
lr = lr * gamma ** milestones_steps_counter[i]
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def multi_step_lr(lr, milestones, steps_per_epoch, max_epoch, gamma=0.1):
return warmup_step_lr(lr, milestones, steps_per_epoch, 0, max_epoch, gamma=gamma)
def step_lr(lr, epoch_size, steps_per_epoch, max_epoch, gamma=0.1):
lr_epochs = []
for i in range(1, max_epoch):
if i % epoch_size == 0:
lr_epochs.append(i)
return multi_step_lr(lr, lr_epochs, steps_per_epoch, max_epoch, gamma=gamma)
def warmup_cosine_annealing_lr(lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0):
"""Cosine annealing learning rate."""
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
lr_each_step = []
for i in range(total_steps):
last_epoch = i // steps_per_epoch
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max)) / 2
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def yolox_warm_cos_lr(
lr,
steps_per_epoch,
warmup_epochs,
max_epoch,
no_aug_epochs,
warmup_lr_start=0,
min_lr_ratio=0.05
):
"""Cosine learning rate with warm up."""
base_lr = lr
min_lr = lr * min_lr_ratio
total_iters = int(max_epoch * steps_per_epoch)
warmup_total_iters = int(warmup_epochs * steps_per_epoch)
no_aug_iter = no_aug_epochs * steps_per_epoch
lr_each_step = []
for i in range(total_iters):
if i < warmup_total_iters:
lr = (base_lr - warmup_lr_start) * pow(
(i + 1) / float(warmup_total_iters), 2
) + warmup_lr_start
elif i >= total_iters - no_aug_iter:
lr = min_lr
else:
lr = min_lr + 0.5 * (base_lr - min_lr) * (1.0 + math.cos(
math.pi * (i - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iter)))
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def warmup_cosine_annealing_lr_v2(lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0):
"""Cosine annealing learning rate V2."""
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
last_lr = 0
last_epoch_v1 = 0
t_max_v2 = int(max_epoch * 1 / 3)
lr_each_step = []
for i in range(total_steps):
last_epoch = i // steps_per_epoch
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
if i < total_steps * 2 / 3:
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max)) / 2
last_lr = lr
last_epoch_v1 = last_epoch
else:
base_lr = last_lr
last_epoch = last_epoch - last_epoch_v1
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max_v2)) / 2
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def warmup_cosine_annealing_lr_sample(lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0):
"""Warmup cosine annealing learning rate."""
start_sample_epoch = 60
step_sample = 2
tobe_sampled_epoch = 60
end_sampled_epoch = start_sample_epoch + step_sample * tobe_sampled_epoch
max_sampled_epoch = max_epoch + tobe_sampled_epoch
t_max = max_sampled_epoch
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
total_sampled_steps = int(max_sampled_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
lr_each_step = []
for i in range(total_sampled_steps):
last_epoch = i // steps_per_epoch
if last_epoch in range(start_sample_epoch, end_sampled_epoch, step_sample):
continue
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max)) / 2
lr_each_step.append(lr)
assert total_steps == len(lr_each_step)
return np.array(lr_each_step).astype(np.float32)
def yolox_no_aug_lr(base_lr, steps_per_epoch, max_epoch, min_lr_ratio=0.05):
total_iters = int(max_epoch * steps_per_epoch)
lr = base_lr * min_lr_ratio
lr_each_step = []
for _ in range(total_iters):
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def get_lr(args):
"""generate learning rate."""
if args.lr_scheduler == 'exponential':
lr = warmup_step_lr(args.lr,
args.lr_epochs,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
gamma=args.lr_gamma,
)
elif args.lr_scheduler == 'cosine_annealing':
lr = warmup_cosine_annealing_lr(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.t_max,
args.eta_min)
elif args.lr_scheduler == 'cosine_annealing_V2':
lr = warmup_cosine_annealing_lr_v2(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.t_max,
args.eta_min)
elif args.lr_scheduler == 'cosine_annealing_sample':
lr = warmup_cosine_annealing_lr_sample(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.t_max,
args.eta_min)
elif args.lr_scheduler == 'yolox_warm_cos_lr':
lr = yolox_warm_cos_lr(lr=args.lr,
steps_per_epoch=args.steps_per_epoch,
warmup_epochs=args.warmup_epochs,
max_epoch=args.max_epoch,
no_aug_epochs=args.no_aug_epochs,
min_lr_ratio=args.min_lr_ratio)
elif args.lr_scheduler == 'no_aug_lr':
lr = yolox_no_aug_lr(
args.lr,
args.steps_per_epoch,
args.max_epoch,
min_lr_ratio=args.min_lr_ratio
)
else:
raise NotImplementedError(args.lr_scheduler)
return lr
def get_param_groups(network, weight_decay):
"""Param groups for optimizer."""
decay_params = []
no_decay_params = []
for x in network.trainable_params():
parameter_name = x.name
if parameter_name.endswith('.bias'):
# all bias not using weight decay
no_decay_params.append(x)
elif parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
elif parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
else:
decay_params.append(x)
return [{'params': no_decay_params, 'weight_decay': 0.0}, {'params': decay_params, 'weight_decay': weight_decay}]
def load_backbone(net, ckpt_path, args):
"""Load darknet53 backbone checkpoint."""
param_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, param_dict)
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in param_dict:
pass
else:
param_not_load.append(param.name)
args.logger.info("not loading param is :", len(param_not_load))
return net
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', tb_writer=None):
self.name = name
self.fmt = fmt
self.reset()
self.tb_writer = tb_writer
self.cur_step = 1
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.tb_writer is not None:
self.tb_writer.add_scalar(self.name, self.val, self.cur_step)
self.cur_step += 1
def __str__(self):
print("loss update----------------------------------------------------------------------")
fmtstr = '{name}:{avg' + self.fmt + '}'
return fmtstr.format(**self.__dict__)
def keep_loss_fp32(network):
"""Keep loss of network with float32"""
from src.yolox import YOLOLossCell
for _, cell in network.cells_and_names():
if isinstance(cell, (YOLOLossCell,)):
cell.to_float(mstype.float32)
class EMACallBack(Callback):
def __init__(self, network, steps_per_epoch, cur_steps=0):
self.steps_per_epoch = steps_per_epoch
self.cur_steps = cur_steps
self.network = network
def epoch_begin(self, run_context):
if self.network.ema:
if not isinstance(self.network.ema_moving_weight, list):
tmp_moving = []
for weight in self.network.ema_moving_weight:
tmp_moving.append(weight.asnumpy())
self.network.ema_moving_weight = tmp_moving
def step_end(self, run_context):
if self.network.ema:
self.network.moving_parameter_update()
self.cur_steps += 1
if self.cur_steps % self.steps_per_epoch == 0:
if isinstance(self.network.ema_moving_weight, list):
tmp_moving = []
moving_name = []
idx = 0
for key in self.network.moving_name:
moving_name.append(key)
for weight in self.network.ema_moving_weight:
param = Parameter(Tensor(weight), name=moving_name[idx])
tmp_moving.append(param)
idx += 1
self.network.ema_moving_weight = ParameterTuple(tmp_moving)
class YOLOXCB(Callback):
"""
YOLOX Callback.
"""
def __init__(self, logger, step_per_epoch, lr, save_ckpt_path, is_modelart=False, per_print_times=1,
train_url=None):
super(YOLOXCB, self).__init__()
self.train_url = train_url
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self._per_print_times = per_print_times
self.lr = lr
self.is_modelarts = is_modelart
self.step_per_epoch = step_per_epoch
self.current_step = 0
self.save_ckpt_path = save_ckpt_path
self.iter_time = time.time()
self.epoch_start_time = time.time()
self.average_loss = []
self.logger = logger
def epoch_begin(self, run_context):
"""
Called before each epoch beginning.
Args:
run_context (RunContext): Include some information of the model.
"""
self.epoch_start_time = time.time()
self.iter_time = time.time()
def epoch_end(self, run_context):
"""
Called after each epoch finished.
Args:
run_context (RunContext): Include some information of the model.
"""
cb_params = run_context.original_args()
cur_epoch = cb_params.cur_epoch_num
loss = cb_params.net_outputs
loss = "loss: %.4f, overflow: %s, scale: %s" % (float(loss[0].asnumpy()),
bool(loss[1].asnumpy()),
int(loss[2].asnumpy()))
self.logger.info(
"epoch: %s epoch time %.2fs %s" % (cur_epoch, time.time() - self.epoch_start_time, loss))
if self.current_step % (self.step_per_epoch * 1) == 0:
if self.is_modelarts:
import moxing as mox
if self.save_ckpt_path and self.train_url:
mox.file.copy_parallel(src_url=self.save_ckpt_path, dst_url=self.train_url)
cur_epoch = self.current_step // self.step_per_epoch
self.logger.info(
"[epoch {}]copy ckpt from{} to {}".format(self.save_ckpt_path, cur_epoch, self.train_url))
def step_begin(self, run_context):
"""
Called before each step beginning.
Args:
run_context (RunContext): Include some information of the model.
"""
def step_end(self, run_context):
"""
Called after each step finished.
Args:
run_context (RunContext): Include some information of the model.
"""
cur_epoch_step = (self.current_step + 1) % self.step_per_epoch
if cur_epoch_step % self._per_print_times == 0 and cur_epoch_step != 0:
cb_params = run_context.original_args()
cur_epoch = cb_params.cur_epoch_num
loss = cb_params.net_outputs
loss = "loss: %.4f, overflow: %s, scale: %s" % (float(loss[0].asnumpy()),
bool(loss[1].asnumpy()),
int(loss[2].asnumpy()))
self.logger.info("epoch: %s step: [%s/%s], %s, lr: %.6f, avg step time: %.2f ms" % (
cur_epoch, cur_epoch_step, self.step_per_epoch, loss, self.lr[self.current_step],
(time.time() - self.iter_time) * 1000 / self._per_print_times))
self.iter_time = time.time()
self.current_step += 1
def end(self, run_context):
"""
Called once after network training.
Args:
run_context (RunContext): Include some information of the model.
"""
class EvalCallBack(Callback):
def __init__(self, dataset, test_net, train_net, detection, config, start_epoch=0, interval=1):
self.dataset = dataset
self.network = train_net
self.test_network = test_net
self.detection = detection
self.logger = config.logger
self.start_epoch = start_epoch
self.interval = interval
self.max_epoch = config.max_epoch
self.best_result = 0
self.best_epoch = 0
self.rank = config.rank
def load_ema_parameter(self):
param_dict = {}
for name, param in self.network.parameters_and_names():
if name.startswith("ema."):
new_name = name.split('ema.')[-1]
param_new = param.clone()
param_new.name = new_name
param_dict[new_name] = param_new
load_param_into_net(self.test_network, param_dict)
def load_network_parameter(self):
param_dict = {}
for name, param in self.network.parameters_and_names():
if name.startswith("network."):
param_new = param.clone()
param_dict[name] = param_new
load_param_into_net(self.test_network, param_dict)
def epoch_end(self, run_context):
cb_param = run_context.original_args()
cur_epoch = cb_param.cur_epoch_num
if cur_epoch >= self.start_epoch:
if (cur_epoch - self.start_epoch) % self.interval == 0 or cur_epoch == self.max_epoch:
if self.rank == 0:
self.load_ema_parameter()
else:
self.load_network_parameter()
self.test_network.set_train(False)
eval_print_str, results = self.inference()
if results >= self.best_result:
self.best_result = results
self.best_epoch = cur_epoch
if os.path.exists('best.ckpt'):
self.remove_ckpoint_file('best.ckpt')
save_checkpoint(cb_param.train_network, 'best.ckpt')
self.logger.info("Best result %s at %s epoch" % (self.best_result, self.best_epoch))
self.logger.info(eval_print_str)
self.logger.info('Ending inference...')
def end(self, run_context):
self.logger.info("Best result %s at %s epoch" % (self.best_result, self.best_epoch))
def inference(self):
self.logger.info('Start inference...')
self.logger.info("eval dataset size, %s" % self.dataset.get_dataset_size())
counts = 0
for data in self.dataset.create_dict_iterator(num_epochs=1):
image = data['image']
img_info = data['image_shape']
img_id = data['img_id']
prediction = self.test_network(image)
prediction = prediction.asnumpy()
img_shape = img_info.asnumpy()
img_id = img_id.asnumpy()
counts = counts + 1
self.detection.detection(prediction, img_shape, img_id)
self.logger.info('Calculating mAP...%s' % counts)
self.logger.info('Calculating mAP...%s' % counts)
result_file_path = self.detection.evaluate_prediction()
self.logger.info('result file path: %s', result_file_path)
eval_result, results = self.detection.get_eval_result()
if eval_result is not None and results is not None:
eval_print_str = '\n=============coco eval result=========\n' + eval_result
return eval_print_str, results
return None, 0
def remove_ckpoint_file(self, file_name):
"""Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
try:
os.chmod(file_name, stat.S_IWRITE)
os.remove(file_name)
except OSError:
self.logger.info("OSError, failed to remove the older ckpt file %s.", file_name)
except ValueError:
self.logger.info("ValueError, failed to remove the older ckpt file %s.", file_name)
class Redirct:
def __init__(self):
self.content = ""
def write(self, content):
self.content += content
def flush(self):
self.content = ""
class DetectionEngine:
""" Detection engine """
def __init__(self, config):
self.config = config
self.input_size = self.config.input_size
self.strides = self.config.fpn_strides # [8, 16, 32]
self.expanded_strides = None
self.grids = None
self.num_classes = config.num_classes
self.conf_thre = config.conf_thre
self.nms_thre = config.nms_thre
self.annFile = os.path.join(config.data_dir, 'annotations/instances_val2017.json')
self._coco = COCO(self.annFile)
self._img_ids = list(sorted(self._coco.imgs.keys()))
self.coco_catIds = self._coco.getCatIds()
self.save_prefix = config.outputs_dir
self.file_path = ''
self.data_list = []
def detection(self, outputs, img_shape, img_ids):
# post process nms
outputs = self.postprocess(outputs, self.num_classes, self.conf_thre, self.nms_thre)
self.data_list.extend(self.convert_to_coco_format(outputs, info_imgs=img_shape, ids=img_ids))
def postprocess(self, prediction, num_classes, conf_thre=0.7, nms_thre=0.45, class_agnostic=False):
""" nms """
box_corner = np.zeros_like(prediction)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
if not image_pred.shape[0]:
continue
# Get score and class with highest confidence
class_conf = np.max(image_pred[:, 5:5 + num_classes], axis=-1) # (8400)
class_pred = np.argmax(image_pred[:, 5:5 + num_classes], axis=-1) # (8400)
conf_mask = (image_pred[:, 4] * class_conf >= conf_thre).squeeze() # (8400)
class_conf = np.expand_dims(class_conf, axis=-1) # (8400, 1)
class_pred = np.expand_dims(class_pred, axis=-1).astype(np.float16) # (8400, 1)
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
detections = detections[conf_mask]
if not detections.shape[0]:
continue
if class_agnostic:
nms_out_index = self._nms(detections[:, :4], detections[:, 4] * detections[:, 5], nms_thre)
else:
nms_out_index = self._batch_nms(detections[:, :4], detections[:, 4] * detections[:, 5],
detections[:, 6], nms_thre)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = np.concatenate((output[i], detections))
return output
def _nms(self, xyxys, scores, threshold):
"""Calculate NMS"""
x1 = xyxys[:, 0]
y1 = xyxys[:, 1]
x2 = xyxys[:, 2]
y2 = xyxys[:, 3]
scores = scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
reserved_boxes = []
while order.size > 0:
i = order[0]
reserved_boxes.append(i)
max_x1 = np.maximum(x1[i], x1[order[1:]])
max_y1 = np.maximum(y1[i], y1[order[1:]])
min_x2 = np.minimum(x2[i], x2[order[1:]])
min_y2 = np.minimum(y2[i], y2[order[1:]])
intersect_w = np.maximum(0.0, min_x2 - max_x1 + 1)
intersect_h = np.maximum(0.0, min_y2 - max_y1 + 1)
intersect_area = intersect_w * intersect_h
ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area)
indexes = np.where(ovr <= threshold)[0]
order = order[indexes + 1]
return reserved_boxes
def _batch_nms(self, xyxys, scores, idxs, threshold, use_offset=True):
"""Calculate Nms based on class info,Each index value correspond to a category,
and NMS will not be applied between elements of different categories."""
if use_offset:
max_coordinate = xyxys.max()
offsets = idxs * (max_coordinate + np.array([1]))
boxes_for_nms = xyxys + offsets[:, None]
keep = self._nms(boxes_for_nms, scores, threshold)
return keep
keep_mask = np.zeros_like(scores, dtype=np.bool_)
for class_id in np.unique(idxs):
curr_indices = np.where(idxs == class_id)[0]
curr_keep_indices = self._nms(xyxys[curr_indices], scores[curr_indices], threshold)
keep_mask[curr_indices[curr_keep_indices]] = True
keep_indices = np.where(keep_mask)[0]
return keep_indices[np.argsort(-scores[keep_indices])]
def convert_to_coco_format(self, outputs, info_imgs, ids):
""" convert to coco format """
data_list = []
for (output, img_h, img_w, img_id) in zip(
outputs, info_imgs[:, 0], info_imgs[:, 1], ids
):
if output is None:
continue
bboxes = output[:, 0:4]
scale = min(
self.input_size[0] / float(img_h), self.input_size[1] / float(img_w)
)
bboxes = bboxes / scale
bboxes[:, [0, 2]] = np.clip(bboxes[:, [0, 2]], 0, img_w)
bboxes[:, [1, 3]] = np.clip(bboxes[:, [1, 3]], 0, img_h)
bboxes = xyxy2xywh(bboxes)
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
for ind in range(bboxes.shape[0]):
label = self.coco_catIds[int(cls[ind])]
pred_data = {
"image_id": int(img_id),
"category_id": label,
"bbox": bboxes[ind].tolist(),
"score": scores[ind].item(),
"segmentation": [],
} # COCO json format
data_list.append(pred_data)
return data_list
def evaluate_prediction(self):
""" generate prediction coco json file """
print('Evaluate in main process...')
# write result to coco json format
t = datetime.now().strftime('_%Y_%m_%d_%H_%M_%S')
try:
self.file_path = self.save_prefix + '/predict' + t + '.json'
f = open(self.file_path, 'w')
json.dump(self.data_list, f)
except IOError as e:
raise RuntimeError("Unable to open json file to dump. What():{}".format(str(e)))
else:
f.close()
if not self.data_list:
self.file_path = ''
return self.file_path
self.data_list.clear()
return self.file_path
def get_eval_result(self):
"""Get eval result"""
if not self.file_path:
return None, None
cocoGt = self._coco
cocoDt = cocoGt.loadRes(self.file_path)
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
rdct = Redirct()
stdout = sys.stdout
sys.stdout = rdct
cocoEval.summarize()
sys.stdout = stdout
return rdct.content, cocoEval.stats[0] | research/cv/yolox/src/util.py | """ utils """
import os
import sys
import time
import math
import json
import stat
from datetime import datetime
from collections import Counter
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import load_checkpoint, load_param_into_net, save_checkpoint, Tensor, Parameter
from mindspore.common.parameter import ParameterTuple
from mindspore.train.callback import Callback
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from src.transform import xyxy2xywh
def linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr):
"""Linear learning rate."""
lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps)
lr = float(init_lr) + lr_inc * current_step
return lr
def warmup_step_lr(lr, lr_epochs, steps_per_epoch, warmup_epochs, max_epoch, gamma=0.1):
"""Warmup step learning rate."""
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
milestones = lr_epochs
milestones_steps = []
for milestone in milestones:
milestones_step = milestone * steps_per_epoch
milestones_steps.append(milestones_step)
lr_each_step = []
lr = base_lr
milestones_steps_counter = Counter(milestones_steps)
for i in range(total_steps):
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
lr = lr * gamma ** milestones_steps_counter[i]
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def multi_step_lr(lr, milestones, steps_per_epoch, max_epoch, gamma=0.1):
return warmup_step_lr(lr, milestones, steps_per_epoch, 0, max_epoch, gamma=gamma)
def step_lr(lr, epoch_size, steps_per_epoch, max_epoch, gamma=0.1):
lr_epochs = []
for i in range(1, max_epoch):
if i % epoch_size == 0:
lr_epochs.append(i)
return multi_step_lr(lr, lr_epochs, steps_per_epoch, max_epoch, gamma=gamma)
def warmup_cosine_annealing_lr(lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0):
"""Cosine annealing learning rate."""
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
lr_each_step = []
for i in range(total_steps):
last_epoch = i // steps_per_epoch
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max)) / 2
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def yolox_warm_cos_lr(
lr,
steps_per_epoch,
warmup_epochs,
max_epoch,
no_aug_epochs,
warmup_lr_start=0,
min_lr_ratio=0.05
):
"""Cosine learning rate with warm up."""
base_lr = lr
min_lr = lr * min_lr_ratio
total_iters = int(max_epoch * steps_per_epoch)
warmup_total_iters = int(warmup_epochs * steps_per_epoch)
no_aug_iter = no_aug_epochs * steps_per_epoch
lr_each_step = []
for i in range(total_iters):
if i < warmup_total_iters:
lr = (base_lr - warmup_lr_start) * pow(
(i + 1) / float(warmup_total_iters), 2
) + warmup_lr_start
elif i >= total_iters - no_aug_iter:
lr = min_lr
else:
lr = min_lr + 0.5 * (base_lr - min_lr) * (1.0 + math.cos(
math.pi * (i - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iter)))
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def warmup_cosine_annealing_lr_v2(lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0):
"""Cosine annealing learning rate V2."""
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
last_lr = 0
last_epoch_v1 = 0
t_max_v2 = int(max_epoch * 1 / 3)
lr_each_step = []
for i in range(total_steps):
last_epoch = i // steps_per_epoch
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
if i < total_steps * 2 / 3:
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max)) / 2
last_lr = lr
last_epoch_v1 = last_epoch
else:
base_lr = last_lr
last_epoch = last_epoch - last_epoch_v1
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max_v2)) / 2
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def warmup_cosine_annealing_lr_sample(lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0):
"""Warmup cosine annealing learning rate."""
start_sample_epoch = 60
step_sample = 2
tobe_sampled_epoch = 60
end_sampled_epoch = start_sample_epoch + step_sample * tobe_sampled_epoch
max_sampled_epoch = max_epoch + tobe_sampled_epoch
t_max = max_sampled_epoch
base_lr = lr
warmup_init_lr = 0
total_steps = int(max_epoch * steps_per_epoch)
total_sampled_steps = int(max_sampled_epoch * steps_per_epoch)
warmup_steps = int(warmup_epochs * steps_per_epoch)
lr_each_step = []
for i in range(total_sampled_steps):
last_epoch = i // steps_per_epoch
if last_epoch in range(start_sample_epoch, end_sampled_epoch, step_sample):
continue
if i < warmup_steps:
lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
else:
lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / t_max)) / 2
lr_each_step.append(lr)
assert total_steps == len(lr_each_step)
return np.array(lr_each_step).astype(np.float32)
def yolox_no_aug_lr(base_lr, steps_per_epoch, max_epoch, min_lr_ratio=0.05):
total_iters = int(max_epoch * steps_per_epoch)
lr = base_lr * min_lr_ratio
lr_each_step = []
for _ in range(total_iters):
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
def get_lr(args):
"""generate learning rate."""
if args.lr_scheduler == 'exponential':
lr = warmup_step_lr(args.lr,
args.lr_epochs,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
gamma=args.lr_gamma,
)
elif args.lr_scheduler == 'cosine_annealing':
lr = warmup_cosine_annealing_lr(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.t_max,
args.eta_min)
elif args.lr_scheduler == 'cosine_annealing_V2':
lr = warmup_cosine_annealing_lr_v2(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.t_max,
args.eta_min)
elif args.lr_scheduler == 'cosine_annealing_sample':
lr = warmup_cosine_annealing_lr_sample(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.t_max,
args.eta_min)
elif args.lr_scheduler == 'yolox_warm_cos_lr':
lr = yolox_warm_cos_lr(lr=args.lr,
steps_per_epoch=args.steps_per_epoch,
warmup_epochs=args.warmup_epochs,
max_epoch=args.max_epoch,
no_aug_epochs=args.no_aug_epochs,
min_lr_ratio=args.min_lr_ratio)
elif args.lr_scheduler == 'no_aug_lr':
lr = yolox_no_aug_lr(
args.lr,
args.steps_per_epoch,
args.max_epoch,
min_lr_ratio=args.min_lr_ratio
)
else:
raise NotImplementedError(args.lr_scheduler)
return lr
def get_param_groups(network, weight_decay):
"""Param groups for optimizer."""
decay_params = []
no_decay_params = []
for x in network.trainable_params():
parameter_name = x.name
if parameter_name.endswith('.bias'):
# all bias not using weight decay
no_decay_params.append(x)
elif parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
elif parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
else:
decay_params.append(x)
return [{'params': no_decay_params, 'weight_decay': 0.0}, {'params': decay_params, 'weight_decay': weight_decay}]
def load_backbone(net, ckpt_path, args):
"""Load darknet53 backbone checkpoint."""
param_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, param_dict)
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in param_dict:
pass
else:
param_not_load.append(param.name)
args.logger.info("not loading param is :", len(param_not_load))
return net
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', tb_writer=None):
self.name = name
self.fmt = fmt
self.reset()
self.tb_writer = tb_writer
self.cur_step = 1
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.tb_writer is not None:
self.tb_writer.add_scalar(self.name, self.val, self.cur_step)
self.cur_step += 1
def __str__(self):
print("loss update----------------------------------------------------------------------")
fmtstr = '{name}:{avg' + self.fmt + '}'
return fmtstr.format(**self.__dict__)
def keep_loss_fp32(network):
"""Keep loss of network with float32"""
from src.yolox import YOLOLossCell
for _, cell in network.cells_and_names():
if isinstance(cell, (YOLOLossCell,)):
cell.to_float(mstype.float32)
class EMACallBack(Callback):
def __init__(self, network, steps_per_epoch, cur_steps=0):
self.steps_per_epoch = steps_per_epoch
self.cur_steps = cur_steps
self.network = network
def epoch_begin(self, run_context):
if self.network.ema:
if not isinstance(self.network.ema_moving_weight, list):
tmp_moving = []
for weight in self.network.ema_moving_weight:
tmp_moving.append(weight.asnumpy())
self.network.ema_moving_weight = tmp_moving
def step_end(self, run_context):
if self.network.ema:
self.network.moving_parameter_update()
self.cur_steps += 1
if self.cur_steps % self.steps_per_epoch == 0:
if isinstance(self.network.ema_moving_weight, list):
tmp_moving = []
moving_name = []
idx = 0
for key in self.network.moving_name:
moving_name.append(key)
for weight in self.network.ema_moving_weight:
param = Parameter(Tensor(weight), name=moving_name[idx])
tmp_moving.append(param)
idx += 1
self.network.ema_moving_weight = ParameterTuple(tmp_moving)
class YOLOXCB(Callback):
"""
YOLOX Callback.
"""
def __init__(self, logger, step_per_epoch, lr, save_ckpt_path, is_modelart=False, per_print_times=1,
train_url=None):
super(YOLOXCB, self).__init__()
self.train_url = train_url
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self._per_print_times = per_print_times
self.lr = lr
self.is_modelarts = is_modelart
self.step_per_epoch = step_per_epoch
self.current_step = 0
self.save_ckpt_path = save_ckpt_path
self.iter_time = time.time()
self.epoch_start_time = time.time()
self.average_loss = []
self.logger = logger
def epoch_begin(self, run_context):
"""
Called before each epoch beginning.
Args:
run_context (RunContext): Include some information of the model.
"""
self.epoch_start_time = time.time()
self.iter_time = time.time()
def epoch_end(self, run_context):
"""
Called after each epoch finished.
Args:
run_context (RunContext): Include some information of the model.
"""
cb_params = run_context.original_args()
cur_epoch = cb_params.cur_epoch_num
loss = cb_params.net_outputs
loss = "loss: %.4f, overflow: %s, scale: %s" % (float(loss[0].asnumpy()),
bool(loss[1].asnumpy()),
int(loss[2].asnumpy()))
self.logger.info(
"epoch: %s epoch time %.2fs %s" % (cur_epoch, time.time() - self.epoch_start_time, loss))
if self.current_step % (self.step_per_epoch * 1) == 0:
if self.is_modelarts:
import moxing as mox
if self.save_ckpt_path and self.train_url:
mox.file.copy_parallel(src_url=self.save_ckpt_path, dst_url=self.train_url)
cur_epoch = self.current_step // self.step_per_epoch
self.logger.info(
"[epoch {}]copy ckpt from{} to {}".format(self.save_ckpt_path, cur_epoch, self.train_url))
def step_begin(self, run_context):
"""
Called before each step beginning.
Args:
run_context (RunContext): Include some information of the model.
"""
def step_end(self, run_context):
"""
Called after each step finished.
Args:
run_context (RunContext): Include some information of the model.
"""
cur_epoch_step = (self.current_step + 1) % self.step_per_epoch
if cur_epoch_step % self._per_print_times == 0 and cur_epoch_step != 0:
cb_params = run_context.original_args()
cur_epoch = cb_params.cur_epoch_num
loss = cb_params.net_outputs
loss = "loss: %.4f, overflow: %s, scale: %s" % (float(loss[0].asnumpy()),
bool(loss[1].asnumpy()),
int(loss[2].asnumpy()))
self.logger.info("epoch: %s step: [%s/%s], %s, lr: %.6f, avg step time: %.2f ms" % (
cur_epoch, cur_epoch_step, self.step_per_epoch, loss, self.lr[self.current_step],
(time.time() - self.iter_time) * 1000 / self._per_print_times))
self.iter_time = time.time()
self.current_step += 1
def end(self, run_context):
"""
Called once after network training.
Args:
run_context (RunContext): Include some information of the model.
"""
class EvalCallBack(Callback):
def __init__(self, dataset, test_net, train_net, detection, config, start_epoch=0, interval=1):
self.dataset = dataset
self.network = train_net
self.test_network = test_net
self.detection = detection
self.logger = config.logger
self.start_epoch = start_epoch
self.interval = interval
self.max_epoch = config.max_epoch
self.best_result = 0
self.best_epoch = 0
self.rank = config.rank
def load_ema_parameter(self):
param_dict = {}
for name, param in self.network.parameters_and_names():
if name.startswith("ema."):
new_name = name.split('ema.')[-1]
param_new = param.clone()
param_new.name = new_name
param_dict[new_name] = param_new
load_param_into_net(self.test_network, param_dict)
def load_network_parameter(self):
param_dict = {}
for name, param in self.network.parameters_and_names():
if name.startswith("network."):
param_new = param.clone()
param_dict[name] = param_new
load_param_into_net(self.test_network, param_dict)
def epoch_end(self, run_context):
cb_param = run_context.original_args()
cur_epoch = cb_param.cur_epoch_num
if cur_epoch >= self.start_epoch:
if (cur_epoch - self.start_epoch) % self.interval == 0 or cur_epoch == self.max_epoch:
if self.rank == 0:
self.load_ema_parameter()
else:
self.load_network_parameter()
self.test_network.set_train(False)
eval_print_str, results = self.inference()
if results >= self.best_result:
self.best_result = results
self.best_epoch = cur_epoch
if os.path.exists('best.ckpt'):
self.remove_ckpoint_file('best.ckpt')
save_checkpoint(cb_param.train_network, 'best.ckpt')
self.logger.info("Best result %s at %s epoch" % (self.best_result, self.best_epoch))
self.logger.info(eval_print_str)
self.logger.info('Ending inference...')
def end(self, run_context):
self.logger.info("Best result %s at %s epoch" % (self.best_result, self.best_epoch))
def inference(self):
self.logger.info('Start inference...')
self.logger.info("eval dataset size, %s" % self.dataset.get_dataset_size())
counts = 0
for data in self.dataset.create_dict_iterator(num_epochs=1):
image = data['image']
img_info = data['image_shape']
img_id = data['img_id']
prediction = self.test_network(image)
prediction = prediction.asnumpy()
img_shape = img_info.asnumpy()
img_id = img_id.asnumpy()
counts = counts + 1
self.detection.detection(prediction, img_shape, img_id)
self.logger.info('Calculating mAP...%s' % counts)
self.logger.info('Calculating mAP...%s' % counts)
result_file_path = self.detection.evaluate_prediction()
self.logger.info('result file path: %s', result_file_path)
eval_result, results = self.detection.get_eval_result()
if eval_result is not None and results is not None:
eval_print_str = '\n=============coco eval result=========\n' + eval_result
return eval_print_str, results
return None, 0
def remove_ckpoint_file(self, file_name):
"""Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
try:
os.chmod(file_name, stat.S_IWRITE)
os.remove(file_name)
except OSError:
self.logger.info("OSError, failed to remove the older ckpt file %s.", file_name)
except ValueError:
self.logger.info("ValueError, failed to remove the older ckpt file %s.", file_name)
class Redirct:
def __init__(self):
self.content = ""
def write(self, content):
self.content += content
def flush(self):
self.content = ""
class DetectionEngine:
""" Detection engine """
def __init__(self, config):
self.config = config
self.input_size = self.config.input_size
self.strides = self.config.fpn_strides # [8, 16, 32]
self.expanded_strides = None
self.grids = None
self.num_classes = config.num_classes
self.conf_thre = config.conf_thre
self.nms_thre = config.nms_thre
self.annFile = os.path.join(config.data_dir, 'annotations/instances_val2017.json')
self._coco = COCO(self.annFile)
self._img_ids = list(sorted(self._coco.imgs.keys()))
self.coco_catIds = self._coco.getCatIds()
self.save_prefix = config.outputs_dir
self.file_path = ''
self.data_list = []
def detection(self, outputs, img_shape, img_ids):
# post process nms
outputs = self.postprocess(outputs, self.num_classes, self.conf_thre, self.nms_thre)
self.data_list.extend(self.convert_to_coco_format(outputs, info_imgs=img_shape, ids=img_ids))
def postprocess(self, prediction, num_classes, conf_thre=0.7, nms_thre=0.45, class_agnostic=False):
""" nms """
box_corner = np.zeros_like(prediction)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
if not image_pred.shape[0]:
continue
# Get score and class with highest confidence
class_conf = np.max(image_pred[:, 5:5 + num_classes], axis=-1) # (8400)
class_pred = np.argmax(image_pred[:, 5:5 + num_classes], axis=-1) # (8400)
conf_mask = (image_pred[:, 4] * class_conf >= conf_thre).squeeze() # (8400)
class_conf = np.expand_dims(class_conf, axis=-1) # (8400, 1)
class_pred = np.expand_dims(class_pred, axis=-1).astype(np.float16) # (8400, 1)
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
detections = detections[conf_mask]
if not detections.shape[0]:
continue
if class_agnostic:
nms_out_index = self._nms(detections[:, :4], detections[:, 4] * detections[:, 5], nms_thre)
else:
nms_out_index = self._batch_nms(detections[:, :4], detections[:, 4] * detections[:, 5],
detections[:, 6], nms_thre)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = np.concatenate((output[i], detections))
return output
def _nms(self, xyxys, scores, threshold):
"""Calculate NMS"""
x1 = xyxys[:, 0]
y1 = xyxys[:, 1]
x2 = xyxys[:, 2]
y2 = xyxys[:, 3]
scores = scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
reserved_boxes = []
while order.size > 0:
i = order[0]
reserved_boxes.append(i)
max_x1 = np.maximum(x1[i], x1[order[1:]])
max_y1 = np.maximum(y1[i], y1[order[1:]])
min_x2 = np.minimum(x2[i], x2[order[1:]])
min_y2 = np.minimum(y2[i], y2[order[1:]])
intersect_w = np.maximum(0.0, min_x2 - max_x1 + 1)
intersect_h = np.maximum(0.0, min_y2 - max_y1 + 1)
intersect_area = intersect_w * intersect_h
ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area)
indexes = np.where(ovr <= threshold)[0]
order = order[indexes + 1]
return reserved_boxes
def _batch_nms(self, xyxys, scores, idxs, threshold, use_offset=True):
"""Calculate Nms based on class info,Each index value correspond to a category,
and NMS will not be applied between elements of different categories."""
if use_offset:
max_coordinate = xyxys.max()
offsets = idxs * (max_coordinate + np.array([1]))
boxes_for_nms = xyxys + offsets[:, None]
keep = self._nms(boxes_for_nms, scores, threshold)
return keep
keep_mask = np.zeros_like(scores, dtype=np.bool_)
for class_id in np.unique(idxs):
curr_indices = np.where(idxs == class_id)[0]
curr_keep_indices = self._nms(xyxys[curr_indices], scores[curr_indices], threshold)
keep_mask[curr_indices[curr_keep_indices]] = True
keep_indices = np.where(keep_mask)[0]
return keep_indices[np.argsort(-scores[keep_indices])]
def convert_to_coco_format(self, outputs, info_imgs, ids):
""" convert to coco format """
data_list = []
for (output, img_h, img_w, img_id) in zip(
outputs, info_imgs[:, 0], info_imgs[:, 1], ids
):
if output is None:
continue
bboxes = output[:, 0:4]
scale = min(
self.input_size[0] / float(img_h), self.input_size[1] / float(img_w)
)
bboxes = bboxes / scale
bboxes[:, [0, 2]] = np.clip(bboxes[:, [0, 2]], 0, img_w)
bboxes[:, [1, 3]] = np.clip(bboxes[:, [1, 3]], 0, img_h)
bboxes = xyxy2xywh(bboxes)
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
for ind in range(bboxes.shape[0]):
label = self.coco_catIds[int(cls[ind])]
pred_data = {
"image_id": int(img_id),
"category_id": label,
"bbox": bboxes[ind].tolist(),
"score": scores[ind].item(),
"segmentation": [],
} # COCO json format
data_list.append(pred_data)
return data_list
def evaluate_prediction(self):
""" generate prediction coco json file """
print('Evaluate in main process...')
# write result to coco json format
t = datetime.now().strftime('_%Y_%m_%d_%H_%M_%S')
try:
self.file_path = self.save_prefix + '/predict' + t + '.json'
f = open(self.file_path, 'w')
json.dump(self.data_list, f)
except IOError as e:
raise RuntimeError("Unable to open json file to dump. What():{}".format(str(e)))
else:
f.close()
if not self.data_list:
self.file_path = ''
return self.file_path
self.data_list.clear()
return self.file_path
def get_eval_result(self):
"""Get eval result"""
if not self.file_path:
return None, None
cocoGt = self._coco
cocoDt = cocoGt.loadRes(self.file_path)
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
rdct = Redirct()
stdout = sys.stdout
sys.stdout = rdct
cocoEval.summarize()
sys.stdout = stdout
return rdct.content, cocoEval.stats[0] | 0.553023 | 0.284191 |
import pytest
from models import Grid, Position
from core.exceptions import OutOfBoundsError, InvalidGridCoordinates
class TestGrid(object):
def test_grid_x_str_value_error(self):
with pytest.raises(ValueError):
Grid('foo', 2)
def test_grid_y_str_value_error(self):
with pytest.raises(ValueError):
Grid(2, 'foo')
def test_invalid_grid_coordinates_x(self):
with pytest.raises(InvalidGridCoordinates):
Grid(-1, 0)
def test_invalid_grid_coordinates_y(self):
with pytest.raises(InvalidGridCoordinates):
Grid(0, -1)
class TestPosition(object):
@pytest.fixture
def grid(self):
return Grid(5, 5)
def test_positions_x_out_of_bounds(self, grid):
with pytest.raises(OutOfBoundsError):
Position(6, 5, grid)
def test_positions_x_out_of_bounds_negative(self, grid):
with pytest.raises(OutOfBoundsError):
Position(-1, 5, grid)
def test_positions_y_out_of_bounds(self, grid):
with pytest.raises(OutOfBoundsError):
Position(5, 6, grid)
def test_positions_y_out_of_bounds_negative(self, grid):
with pytest.raises(OutOfBoundsError):
Position(5, -1, grid)
def test_position_x_str_value_error(self, grid):
with pytest.raises(ValueError):
Position('foo', 2, grid)
def test_position_y_str_value_error(self, grid):
with pytest.raises(ValueError):
Position(2, 'foo', grid)
def test_position_x_y_bounds_lower_left(self, grid):
position = Position(0, 0, grid)
assert position.x == 0
assert position.y == 0
def test_position_x_y_bounds_lower_right(self, grid):
position = Position(5, 0, grid)
assert position.x == 5
assert position.y == 0
def test_position_x_y_bounds_upper_left(self, grid):
position = Position(0, 5, grid)
assert position.x == 0
assert position.y == 5
def test_position_x_y_bounds_upper_right(self, grid):
position = Position(5, 5, grid)
assert position.x == 5
assert position.y == 5 | models/test_grid.py | import pytest
from models import Grid, Position
from core.exceptions import OutOfBoundsError, InvalidGridCoordinates
class TestGrid(object):
def test_grid_x_str_value_error(self):
with pytest.raises(ValueError):
Grid('foo', 2)
def test_grid_y_str_value_error(self):
with pytest.raises(ValueError):
Grid(2, 'foo')
def test_invalid_grid_coordinates_x(self):
with pytest.raises(InvalidGridCoordinates):
Grid(-1, 0)
def test_invalid_grid_coordinates_y(self):
with pytest.raises(InvalidGridCoordinates):
Grid(0, -1)
class TestPosition(object):
@pytest.fixture
def grid(self):
return Grid(5, 5)
def test_positions_x_out_of_bounds(self, grid):
with pytest.raises(OutOfBoundsError):
Position(6, 5, grid)
def test_positions_x_out_of_bounds_negative(self, grid):
with pytest.raises(OutOfBoundsError):
Position(-1, 5, grid)
def test_positions_y_out_of_bounds(self, grid):
with pytest.raises(OutOfBoundsError):
Position(5, 6, grid)
def test_positions_y_out_of_bounds_negative(self, grid):
with pytest.raises(OutOfBoundsError):
Position(5, -1, grid)
def test_position_x_str_value_error(self, grid):
with pytest.raises(ValueError):
Position('foo', 2, grid)
def test_position_y_str_value_error(self, grid):
with pytest.raises(ValueError):
Position(2, 'foo', grid)
def test_position_x_y_bounds_lower_left(self, grid):
position = Position(0, 0, grid)
assert position.x == 0
assert position.y == 0
def test_position_x_y_bounds_lower_right(self, grid):
position = Position(5, 0, grid)
assert position.x == 5
assert position.y == 0
def test_position_x_y_bounds_upper_left(self, grid):
position = Position(0, 5, grid)
assert position.x == 0
assert position.y == 5
def test_position_x_y_bounds_upper_right(self, grid):
position = Position(5, 5, grid)
assert position.x == 5
assert position.y == 5 | 0.824568 | 0.703753 |
import unittest
from checkov.terraform.checks.resource.gcp.GoogleBigQueryDatasetPublicACL import check
from checkov.common.models.enums import CheckResult
class TestBigQueryDatasetPublicACL(unittest.TestCase):
def test_failure_special_group(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["READER"], "special_group": ["allAuthenticatedUsers"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_all_users(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["VIEWER"], "special_group": ["projectReaders"]},
{"role": ["READER"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success_special_group(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["READER"], "special_group": ["projectReaders"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["EDITOR"], "user_by_email": ["<EMAIL>"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main() | tests/terraform/checks/resource/gcp/test_GoogleBigQueryDatasetPublicACL.py | import unittest
from checkov.terraform.checks.resource.gcp.GoogleBigQueryDatasetPublicACL import check
from checkov.common.models.enums import CheckResult
class TestBigQueryDatasetPublicACL(unittest.TestCase):
def test_failure_special_group(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["READER"], "special_group": ["allAuthenticatedUsers"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_all_users(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["VIEWER"], "special_group": ["projectReaders"]},
{"role": ["READER"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success_special_group(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["READER"], "special_group": ["projectReaders"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success(self):
resource_conf = {"dataset_id": ["example_dataset"],
"friendly_name": ["test"],
"description": ["This is a test description"],
"location": ["EU"],
"default_table_expiration_ms": [3600000],
"access": [{"role": ["EDITOR"], "user_by_email": ["<EMAIL>"]}]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main() | 0.555676 | 0.478529 |
import os
import re
RULE_REGEX = re.compile(r'(.+): (\d+)-(\d+) or (\d+)-(\d+)')
DEPARTURE_REGEX = re.compile(r'^departure')
def is_valid(value, rule1, rule2):
return (rule1[0] <= value <= rule1[1]) or (rule2[0] <= value <= rule2[1])
def filter_tickets(tickets, rules):
error_rate = 0
valid_tickets = []
for ticket in nearby_tickets:
valid = True
for value in ticket:
if all(
not is_valid(value, rule1, rule2)
for rule1, rule2 in rules.values()
):
valid = False
error_rate += value
if valid:
valid_tickets.append(ticket)
return valid_tickets, error_rate
def parse_ticket_fields(tickets, rules):
possible_fields = {
rule: set(range(len(tickets)))
for rule in rules.keys()
}
# Narrow possible field sets by checking ticket values against rules.
unknown_positions = set(range(len(tickets)))
for ticket in tickets:
for i, value in enumerate(ticket):
if i not in unknown_positions:
continue
for rule, fields in possible_fields.items():
if len(fields) == 1:
continue
rule1, rule2 = rules[rule]
if not is_valid(value, rule1, rule2):
# Remove field position if ticket has values not valid for rule.
fields.remove(i)
# If only one possible field remains for rule, remove from
# other rules.
if len(fields) == 1:
found_position = list(fields)[0]
unknown_positions.remove(found_position)
for other_rule, other_field in possible_fields.items():
if rule == other_rule:
continue
other_field.remove(found_position)
# Narrow possible field sets by looking for field positions found in
# single rule until all rules have single field.
fields_parsed = False
while not fields_parsed:
fields_parsed = True
for rule, fields in possible_fields.items():
if len(fields) == 1:
continue
fields_parsed = False
other_rules_fields = [
other_fields
for other_rule, other_fields in possible_fields.items()
if other_rule != rule
]
for field in fields:
if all(
field not in other_fields
for other_fields in other_rules_fields
):
possible_fields[rule] = set([field])
break
return [
rule
for rule, _ in sorted(
possible_fields.items(),
key=lambda x: list(x[1])[0],
)
]
if __name__ == '__main__':
with open(os.path.join('sampleinputs', 'day16.txt')) as file:
sections = file.read().strip().split('\n\n')
[rules, ticket, nearby_tickets] = sections
rules = {
match[1]: (
(int(match[2]), int(match[3])),
(int(match[4]), int(match[5])),
)
for match in [
RULE_REGEX.match(rule)
for rule in sections[0].split('\n')
]
}
ticket = [
int(value)
for value in sections[1].split('\n')[1].split(',')
]
nearby_tickets = [
[int(value) for value in ticket.split(',')]
for ticket in sections[2].split('\n')[1:]
]
valid_tickets, error_rate = filter_tickets(nearby_tickets, rules)
print(f'Part 1: {error_rate}')
ticket_fields = parse_ticket_fields(valid_tickets, rules)
departure_product = 1
for i, field in enumerate(ticket_fields):
if not DEPARTURE_REGEX.match(field):
continue
departure_product *= ticket[i]
print(f'Part 2: {departure_product}') | 2020/day16.py | import os
import re
RULE_REGEX = re.compile(r'(.+): (\d+)-(\d+) or (\d+)-(\d+)')
DEPARTURE_REGEX = re.compile(r'^departure')
def is_valid(value, rule1, rule2):
return (rule1[0] <= value <= rule1[1]) or (rule2[0] <= value <= rule2[1])
def filter_tickets(tickets, rules):
error_rate = 0
valid_tickets = []
for ticket in nearby_tickets:
valid = True
for value in ticket:
if all(
not is_valid(value, rule1, rule2)
for rule1, rule2 in rules.values()
):
valid = False
error_rate += value
if valid:
valid_tickets.append(ticket)
return valid_tickets, error_rate
def parse_ticket_fields(tickets, rules):
possible_fields = {
rule: set(range(len(tickets)))
for rule in rules.keys()
}
# Narrow possible field sets by checking ticket values against rules.
unknown_positions = set(range(len(tickets)))
for ticket in tickets:
for i, value in enumerate(ticket):
if i not in unknown_positions:
continue
for rule, fields in possible_fields.items():
if len(fields) == 1:
continue
rule1, rule2 = rules[rule]
if not is_valid(value, rule1, rule2):
# Remove field position if ticket has values not valid for rule.
fields.remove(i)
# If only one possible field remains for rule, remove from
# other rules.
if len(fields) == 1:
found_position = list(fields)[0]
unknown_positions.remove(found_position)
for other_rule, other_field in possible_fields.items():
if rule == other_rule:
continue
other_field.remove(found_position)
# Narrow possible field sets by looking for field positions found in
# single rule until all rules have single field.
fields_parsed = False
while not fields_parsed:
fields_parsed = True
for rule, fields in possible_fields.items():
if len(fields) == 1:
continue
fields_parsed = False
other_rules_fields = [
other_fields
for other_rule, other_fields in possible_fields.items()
if other_rule != rule
]
for field in fields:
if all(
field not in other_fields
for other_fields in other_rules_fields
):
possible_fields[rule] = set([field])
break
return [
rule
for rule, _ in sorted(
possible_fields.items(),
key=lambda x: list(x[1])[0],
)
]
if __name__ == '__main__':
with open(os.path.join('sampleinputs', 'day16.txt')) as file:
sections = file.read().strip().split('\n\n')
[rules, ticket, nearby_tickets] = sections
rules = {
match[1]: (
(int(match[2]), int(match[3])),
(int(match[4]), int(match[5])),
)
for match in [
RULE_REGEX.match(rule)
for rule in sections[0].split('\n')
]
}
ticket = [
int(value)
for value in sections[1].split('\n')[1].split(',')
]
nearby_tickets = [
[int(value) for value in ticket.split(',')]
for ticket in sections[2].split('\n')[1:]
]
valid_tickets, error_rate = filter_tickets(nearby_tickets, rules)
print(f'Part 1: {error_rate}')
ticket_fields = parse_ticket_fields(valid_tickets, rules)
departure_product = 1
for i, field in enumerate(ticket_fields):
if not DEPARTURE_REGEX.match(field):
continue
departure_product *= ticket[i]
print(f'Part 2: {departure_product}') | 0.354321 | 0.441673 |
import nltk
from nltk import TweetTokenizer
import string
import re
import numpy as np
class TextProcessor:
"""TextProcessor
This class is to help automate text processing for the analysis
of unstructured text data to be used for text mining and NLP tasks.
The main NLP library used within this class is NLTK.
"""
# specifies how noun phrases are collected by the noun phrase parser
default_noun_phrase_format = r"""NP: {<JJ.*>*<NN.*>+<DT>*<IN.*>*<JJ.*>*<NN.*>+}
{<JJ.*>+<NN.*>+}"""
punctuation = {char for char in string.punctuation} ; punctuation.add('...')
def __init__(
self,
tokenizer = nltk.word_tokenize,
lemmatizer = nltk.WordNetLemmatizer().lemmatize,
stopwords = nltk.corpus.stopwords.words('english')
):
"""
Initialize a new TextProcessor object.
__init__(self, tokenizer, lemmatizer, stopwords)
tokenizer: str -> [str]; optional (default = nltk.tokenize.word_tokenize)
Tokenizer function. Function that takes in a sentence string and returns
the list of token strings.
lemmatizer: str -> str; optional (default = nltk.stem.WordNetLemmatizer().lemmatize)
Lemmatizer function. Function that takes in a token string and returns the
lemmatized token string.
stopwords: [str]; optional (default = nltk.corpus.stopwords.words('english))
List of stopwords. The list of words to be ignored during processing.
"""
self.tokenizer = tokenizer
self.lemmatizer = lemmatizer
self.stopwords = set(stopwords)
##############################################################################
## Stopword Methods
##############################################################################
def add_stopwords(self, new_stopwords):
"""
Add a word or list of words to the list of stopwords.
add_stopwords(self, new_stopwords)
new_stopwords: str or [str]
A single word or a list of words to be added to the list of stopwords.
Raises ValueError if a list entry is not a string. In this case, none of the
list entries are added to the stopwords list
Returns: None
"""
if isinstance(new_stopwords, str):
self.stopwords.add(new_stopwords)
elif isinstance(new_stopwords, list):
for stopword in new_stopwords: # check all entries are strings
if not isinstance(stopword, str):
raise ValueError(f"A list entry (entry={stopword}) was found that wasn't a string. Only strings can be added as stopwords")
for stopword in new_stopwords: # add to the list of stopwords
self.stopwords.add(stopword)
def remove_stopwords(self, old_stopwords):
"""
Remove a word or list of words from the list of stopwords.
remove_stopwords(self, old_stopwords)
old_stopwords: str or [str]
A single word or list of words to be removed from the list of stopwords.
Returns: None
"""
if isinstance(old_stopwords, str):
try:
self.stopwords.remove(old_stopwords)
except(KeyError):
pass
return
elif isinstance(old_stopwords, list):
for stopword in old_stopwords:
try:
self.stopwords.remove(stopword)
except(KeyError):
pass
def get_stopwords(self):
"""
Get the list of stopwords.
get_stopwords(self)
Returns [str]; The list of stopwords. That is, the list of words that are ignored
during processing.
"""
return self.stopwords
##############################################################################
## Tokenization Methods
##############################################################################
def tokenize(self, sentence, to_lowercase=False, lemmatize=False):
"""
Get the tokens from a given sentence.
tokenize(self, sentence, to_lowercase, lemmatize)
sentence: str
The sentence to be tokenized.
to_lowercase: bool; optional (default = False)
True if tokens are to be converted to lowercase.
lemmatize: bool; optional (default = False)
True if tokens are to be lemmatized
Returns [str]; The list of tokens for the given sentence.
Note: If the given sentence is not a string, the numpy nan is returned.
This is useful for processing on a Pandas DataFrame without worrying about types.
"""
if not isinstance(sentence, str):
return np.nan
if lemmatize:
lemmatized_pos_tuples = self.get_all_pos(sentence, to_lowercase=to_lowercase, lemmatize=True)
return [token for (token, tag) in lemmatized_pos_tuples]
tokens = self.tokenizer(sentence)
if to_lowercase:
tokens = self.tokens_to_lowercase(tokens)
return tokens
def tokens_to_lowercase(self, tokens):
"""
Convert each token in list of tokens to lowercase.
tokens_to_lowercase(self, tokens)
tokens: [str]
List of tokens to be converted to lowercase.
Returns [str]; The given list of tokens converted to lowercase.
Note: If tokens is not a list, it will return np.nan. This is useful for processing on a
Pandas DataFrame without worrying about types.
"""
if not isinstance(tokens, list):
return np.nan
return [token.lower() for token in tokens]
##############################################################################
## Parts-of-Speech (POS) Methods
##############################################################################
def get_all_pos(
self,
sentence,
to_lowercase=False,
lemmatize=False
):
"""
Get all the possible pos_tuples (words paired with their corresponding part-of-speech)
for the given sentence.
get_all_pos(self, sentence, to_lowercase, lemmatize)
sentence: str
The sentence to derive the pos_tuples from.
to_lowercase: bool; optional (default = True)
True if pos_tuple tokens should be converted to lowercase.
lemmatize: bool; optional (default = False)
True if pos_tuple tokens should be lemmatized.
Returns [(str, str)]; The list of pos_tuples derived from a given sentence.
That is, the list of tuples consisting of each word paired with its part-of-speech
"""
sentence = str(sentence)
if sentence == 'nan':
return np.nan
tokens = self.tokenize(sentence) # tokenize
pos_tuples = nltk.pos_tag(tokens) # get the pos
if lemmatize:
pos_tuples = self.lemmatize_pos_tuples(pos_tuples)
if to_lowercase:
tokens = self.tokens_to_lowercase([token for (token, tag) in pos_tuples])
tags = [tag for (token, tag) in pos_tuples]
pos_tuples = [(tokens[i], tags[i]) for i in range(len(pos_tuples))]
return pos_tuples
def get_pos(
self,
sentence,
tag,
to_lowercase=False,
lemmatize=False
):
"""
Get all tokens corresponding to a specific part-of-speech for the given sentence.
Note that the given tag must either match or partially match a pos-tag in NLTK's tagset
(For example, to search for adjectives you need to specify tag="JJ" or just tag="J", etc.
search on Google for more information about NTLK's tagset)
get_pos(self, sentence, tag, to_lowercase, lemmatize)
sentence: str
The sentence to derive the tokens from.
tag: str
The tag associated with the part-of-speech. Note that the given tag must either match
or partially match a pos-tag in NLTK's tagset
to_lowercase: bool; optional (default = False)
True if tokens should be converted to lowercase.
lemmatize: bool; optional (default = False)
True if tokens should be lemmatized.
Returns [str]; The list of tokens corresponding to the given part-of-speech tag.
"""
sentence = str(sentence)
if sentence == 'nan':
return np.nan
pos_tuples = self.get_all_pos(sentence, to_lowercase, lemmatize)
return TextProcessor.__filter_pos_tuples(pos_tuples, tag)
def __filter_pos_tuples(pos_tuples, match_tag):
"""
<PRIVATE CLASS METHOD> Returns the tokens whose pos tag matches the given match_tag from the
given list of pos_tuples.
__filter_pos_tuples(self, pos_tuples, match_tag)
pos_tuples: [(str, str)]
List of pos_tuples to filter from.
match_tag: str
The part-of-speech tag to filter the pos_tuples on.
Returns [tokens]; The list of tokens that have the same tag as the given match_tag.
Note: If pos_tuples is not a list, it will return np.nan. This is useful for processing on a
Pandas DataFrame without worrying about types.
"""
if not isinstance(pos_tuples, list):
return np.nan
return [token for (token, tag) in pos_tuples if match_tag in tag]
##############################################################################
## Lemmatization Methods
##############################################################################
def lemmatize_pos_tuples(self, pos_tuples):
"""
Lemmatize the token part of each tuple in the given list of pos_tuples.
lemmatize_pos_tuples(self, pos_tuples)
pos_tuples: [(str, str)]
The list of pos_tuples to be lemmatized.
Returns [(str, str)]; The list of pos_tuples with the tokens lemmatized.
"""
pos_tuples_wordnet = TextProcessor._TextProcessor__format_pos_tuples_to_wordnet(pos_tuples)
lemmatized_pos_tokens = [self.lemmatizer(token, pos=tag) for (token, tag) in pos_tuples_wordnet] # lemmatize the tokens
original_pos_tags = [tag for (token, tag) in pos_tuples] # keep the original POS tag (not the wordnet tag)
# match each token with their original pos-tag
return [(lemmatized_pos_tokens[i], original_pos_tags[i]) for i in range(len(pos_tuples))]
def __format_pos_tuples_to_wordnet(pos_tuples):
"""
<PRIVATE CLASS METHOD> Convert the pos-tags from the given a list of pos_tuples, to the format
that is accepted by WordNet.
__format_pos_tuples_to_wordnet(pos_tuples)
pos_tuples: [(str, str)]
List of pos_tuples to be formatted.
Returns [(str, str)]; The pos-tuples with WordNet-compatable pos-tags
"""
# dictionary of the WordNet POS labels
wordnet_tags = {"J": nltk.corpus.wordnet.ADJ,
"N": nltk.corpus.wordnet.NOUN,
"V": nltk.corpus.wordnet.VERB,
"R": nltk.corpus.wordnet.ADV}
return [(token, wordnet_tags.get(tag[0], nltk.corpus.wordnet.NOUN)) for (token, tag) in pos_tuples]
##############################################################################
## Noun Phrase Methods
##############################################################################
def get_noun_phrases(
self,
sentence,
noun_phrase_format=default_noun_phrase_format,
to_lowercase=False,
singularize=False
):
"""
Derive all the noun phrases contained in a given sentence.
get_noun_phrases(self, sentence, noun_phrase_format, to_lowercase, singularize)
sentence: str
The sentence to derive the noun phrases from.
noun_phrase_format: str; optional (default = TextProcessor.noun_phrase_format)
A string specifying how the noun phrases should be formatted/structured.
to_lowercase: bool; optional (default=False)
True if noun phrases should be converted to lowercase
singularize: bool; optional (default = False)
True if individual nouns within noun phrases should be singularized.
Returns [str]; The list of noun phrases produced from the given sentence.
Note: If no pos_tuples can be derived from the sentence, it will return np.nan.
This is useful for processing on a Pandas DataFrame without worrying about types.
"""
# get all pos tuples
pos_tuples = self.get_all_pos(sentence, to_lowercase=to_lowercase, lemmatize=singularize)
# find the noun phrases based on the noun phrase format
pos_tuples_noun_phrases = TextProcessor.__build_noun_phrases(pos_tuples, noun_phrase_format)
if not isinstance(pos_tuples_noun_phrases, list):
return np.nan
return [token for (token, tag) in pos_tuples_noun_phrases if tag == 'NP']
def __build_noun_phrases(
pos_tuples,
noun_phrase_format=default_noun_phrase_format
):
"""
<PRIVATE CLASS METHOD> Build the noun phrases by combining adjacent tuples that form a noun
phrase. Returns the list of pos_tuples with the noun phrases combined and labelled with the
tag 'NP'.
__build_noun_phrases(pos_tuples, noun_phrase_format)
pos_tuples: [(str, str)]
The list of pos_tuples to derive noun phrases from.
noun_phrase_format: str; optional (default = TextProcessor.default_noun_phrase_format)
A string specifying how the noun phrases should be formatted/structured.
Returns [(str, str)]; A list of pos_tuples containing noun phrases produced from the
original list of pos_tuples. The noun phrases are assigned the tag 'NP'
Note: If pos_tuples is not a list, it will return np.nan. This is useful for processing on a
Pandas DataFrame without worrying about types.
"""
if not isinstance(pos_tuples, list):
return np.nan
chunk_parser = nltk.RegexpParser(noun_phrase_format) # define the noun phrase parser
parsed_sentence = chunk_parser.parse(pos_tuples) # parse the sentence
pos_tuples_noun_phrases = []
for chunk in parsed_sentence:
if isinstance(chunk, nltk.tree.Tree): # found a noun phrase to add
noun_phrase = "" # build the noun phrase
for i in range(len(chunk)):
if i == len(chunk) - 1:
noun_phrase += chunk[i][0]
else:
noun_phrase += chunk[i][0] + " "
pos_tuples_noun_phrases.append((noun_phrase, 'NP'))
else:
pos_tuples_noun_phrases.append(chunk)
return pos_tuples_noun_phrases
##############################################################################
## Processing Methods
##############################################################################
def process(
self,
sentence,
to_lowercase=True,
preserve_noun_phrases=False,
remove_numbers=True,
custom_processing=lambda x:x
):
"""
Tokenize, lemmatize and remove stopwords from a given sentence. Returns a list of tokens.
Optionally convert tokens to lowercase, preserve noun phrases, remove numbers, and apply
custom processing. This method is intended for text pre-processing for machine learning
and other AI algorithms such as topic modelling, sentiment analysis, etc.
process(self, sentence, to_lowercase, preserve_noun_phrases, remove_numbers, custom_processing)
sentence: str
The sentence to be processed.
to_lowercase: bool; optional (default = True)
True if tokens should be converted to lowercase.
preserve_noun_phrases: bool; optional (default = False)
True if noun phrases should be preserved in the list of tokens.
remove_numbers: bool; optional (default = True)
True if numbers/digits should be excluded from the list of tokens.
custom_processing: str -> str; optional (default = lambda x: x)
A function that takes in the sentence string and returns a string.
Returns [str]; The list of lemmatized and non-stopword tokens from the given sentence.
Note: If no pos_tuples can be derived from the sentence or if the sentence cannot be casted
to a string, it will return np.nan. This is useful for processing on a Pandas DataFrame
without worrying about types.
"""
sentence = str(sentence)
if sentence == 'nan':
return np.nan
sentence = custom_processing(sentence) # apply custom processing step
pos_tuples = self.get_all_pos(sentence, to_lowercase=to_lowercase, lemmatize=True) # get parts-of-speech
# collect noun phrases if applicable
if preserve_noun_phrases:
pos_tuples = TextProcessor.__build_noun_phrases(pos_tuples)
if not isinstance(pos_tuples, list):
return np.nan
# remove pos tags
tokens = [token for (token, tag) in pos_tuples]
# remove stopwords and punctuation
filtered_tokens = []
for token in tokens:
if (
token.lower() not in self.stopwords
and token not in TextProcessor.punctuation
):
filtered_tokens.append(token)
if remove_numbers:
filtered_tokens = list(filter(lambda token: re.search("[A-Za-z]", token) is not None, filtered_tokens))
return filtered_tokens if len(filtered_tokens) > 0 else np.nan | src/text_processing/text_processor.py | import nltk
from nltk import TweetTokenizer
import string
import re
import numpy as np
class TextProcessor:
"""TextProcessor
This class is to help automate text processing for the analysis
of unstructured text data to be used for text mining and NLP tasks.
The main NLP library used within this class is NLTK.
"""
# specifies how noun phrases are collected by the noun phrase parser
default_noun_phrase_format = r"""NP: {<JJ.*>*<NN.*>+<DT>*<IN.*>*<JJ.*>*<NN.*>+}
{<JJ.*>+<NN.*>+}"""
punctuation = {char for char in string.punctuation} ; punctuation.add('...')
def __init__(
self,
tokenizer = nltk.word_tokenize,
lemmatizer = nltk.WordNetLemmatizer().lemmatize,
stopwords = nltk.corpus.stopwords.words('english')
):
"""
Initialize a new TextProcessor object.
__init__(self, tokenizer, lemmatizer, stopwords)
tokenizer: str -> [str]; optional (default = nltk.tokenize.word_tokenize)
Tokenizer function. Function that takes in a sentence string and returns
the list of token strings.
lemmatizer: str -> str; optional (default = nltk.stem.WordNetLemmatizer().lemmatize)
Lemmatizer function. Function that takes in a token string and returns the
lemmatized token string.
stopwords: [str]; optional (default = nltk.corpus.stopwords.words('english))
List of stopwords. The list of words to be ignored during processing.
"""
self.tokenizer = tokenizer
self.lemmatizer = lemmatizer
self.stopwords = set(stopwords)
##############################################################################
## Stopword Methods
##############################################################################
def add_stopwords(self, new_stopwords):
"""
Add a word or list of words to the list of stopwords.
add_stopwords(self, new_stopwords)
new_stopwords: str or [str]
A single word or a list of words to be added to the list of stopwords.
Raises ValueError if a list entry is not a string. In this case, none of the
list entries are added to the stopwords list
Returns: None
"""
if isinstance(new_stopwords, str):
self.stopwords.add(new_stopwords)
elif isinstance(new_stopwords, list):
for stopword in new_stopwords: # check all entries are strings
if not isinstance(stopword, str):
raise ValueError(f"A list entry (entry={stopword}) was found that wasn't a string. Only strings can be added as stopwords")
for stopword in new_stopwords: # add to the list of stopwords
self.stopwords.add(stopword)
def remove_stopwords(self, old_stopwords):
"""
Remove a word or list of words from the list of stopwords.
remove_stopwords(self, old_stopwords)
old_stopwords: str or [str]
A single word or list of words to be removed from the list of stopwords.
Returns: None
"""
if isinstance(old_stopwords, str):
try:
self.stopwords.remove(old_stopwords)
except(KeyError):
pass
return
elif isinstance(old_stopwords, list):
for stopword in old_stopwords:
try:
self.stopwords.remove(stopword)
except(KeyError):
pass
def get_stopwords(self):
"""
Get the list of stopwords.
get_stopwords(self)
Returns [str]; The list of stopwords. That is, the list of words that are ignored
during processing.
"""
return self.stopwords
##############################################################################
## Tokenization Methods
##############################################################################
def tokenize(self, sentence, to_lowercase=False, lemmatize=False):
"""
Get the tokens from a given sentence.
tokenize(self, sentence, to_lowercase, lemmatize)
sentence: str
The sentence to be tokenized.
to_lowercase: bool; optional (default = False)
True if tokens are to be converted to lowercase.
lemmatize: bool; optional (default = False)
True if tokens are to be lemmatized
Returns [str]; The list of tokens for the given sentence.
Note: If the given sentence is not a string, the numpy nan is returned.
This is useful for processing on a Pandas DataFrame without worrying about types.
"""
if not isinstance(sentence, str):
return np.nan
if lemmatize:
lemmatized_pos_tuples = self.get_all_pos(sentence, to_lowercase=to_lowercase, lemmatize=True)
return [token for (token, tag) in lemmatized_pos_tuples]
tokens = self.tokenizer(sentence)
if to_lowercase:
tokens = self.tokens_to_lowercase(tokens)
return tokens
def tokens_to_lowercase(self, tokens):
"""
Convert each token in list of tokens to lowercase.
tokens_to_lowercase(self, tokens)
tokens: [str]
List of tokens to be converted to lowercase.
Returns [str]; The given list of tokens converted to lowercase.
Note: If tokens is not a list, it will return np.nan. This is useful for processing on a
Pandas DataFrame without worrying about types.
"""
if not isinstance(tokens, list):
return np.nan
return [token.lower() for token in tokens]
##############################################################################
## Parts-of-Speech (POS) Methods
##############################################################################
def get_all_pos(
self,
sentence,
to_lowercase=False,
lemmatize=False
):
"""
Get all the possible pos_tuples (words paired with their corresponding part-of-speech)
for the given sentence.
get_all_pos(self, sentence, to_lowercase, lemmatize)
sentence: str
The sentence to derive the pos_tuples from.
to_lowercase: bool; optional (default = True)
True if pos_tuple tokens should be converted to lowercase.
lemmatize: bool; optional (default = False)
True if pos_tuple tokens should be lemmatized.
Returns [(str, str)]; The list of pos_tuples derived from a given sentence.
That is, the list of tuples consisting of each word paired with its part-of-speech
"""
sentence = str(sentence)
if sentence == 'nan':
return np.nan
tokens = self.tokenize(sentence) # tokenize
pos_tuples = nltk.pos_tag(tokens) # get the pos
if lemmatize:
pos_tuples = self.lemmatize_pos_tuples(pos_tuples)
if to_lowercase:
tokens = self.tokens_to_lowercase([token for (token, tag) in pos_tuples])
tags = [tag for (token, tag) in pos_tuples]
pos_tuples = [(tokens[i], tags[i]) for i in range(len(pos_tuples))]
return pos_tuples
def get_pos(
self,
sentence,
tag,
to_lowercase=False,
lemmatize=False
):
"""
Get all tokens corresponding to a specific part-of-speech for the given sentence.
Note that the given tag must either match or partially match a pos-tag in NLTK's tagset
(For example, to search for adjectives you need to specify tag="JJ" or just tag="J", etc.
search on Google for more information about NTLK's tagset)
get_pos(self, sentence, tag, to_lowercase, lemmatize)
sentence: str
The sentence to derive the tokens from.
tag: str
The tag associated with the part-of-speech. Note that the given tag must either match
or partially match a pos-tag in NLTK's tagset
to_lowercase: bool; optional (default = False)
True if tokens should be converted to lowercase.
lemmatize: bool; optional (default = False)
True if tokens should be lemmatized.
Returns [str]; The list of tokens corresponding to the given part-of-speech tag.
"""
sentence = str(sentence)
if sentence == 'nan':
return np.nan
pos_tuples = self.get_all_pos(sentence, to_lowercase, lemmatize)
return TextProcessor.__filter_pos_tuples(pos_tuples, tag)
def __filter_pos_tuples(pos_tuples, match_tag):
"""
<PRIVATE CLASS METHOD> Returns the tokens whose pos tag matches the given match_tag from the
given list of pos_tuples.
__filter_pos_tuples(self, pos_tuples, match_tag)
pos_tuples: [(str, str)]
List of pos_tuples to filter from.
match_tag: str
The part-of-speech tag to filter the pos_tuples on.
Returns [tokens]; The list of tokens that have the same tag as the given match_tag.
Note: If pos_tuples is not a list, it will return np.nan. This is useful for processing on a
Pandas DataFrame without worrying about types.
"""
if not isinstance(pos_tuples, list):
return np.nan
return [token for (token, tag) in pos_tuples if match_tag in tag]
##############################################################################
## Lemmatization Methods
##############################################################################
def lemmatize_pos_tuples(self, pos_tuples):
"""
Lemmatize the token part of each tuple in the given list of pos_tuples.
lemmatize_pos_tuples(self, pos_tuples)
pos_tuples: [(str, str)]
The list of pos_tuples to be lemmatized.
Returns [(str, str)]; The list of pos_tuples with the tokens lemmatized.
"""
pos_tuples_wordnet = TextProcessor._TextProcessor__format_pos_tuples_to_wordnet(pos_tuples)
lemmatized_pos_tokens = [self.lemmatizer(token, pos=tag) for (token, tag) in pos_tuples_wordnet] # lemmatize the tokens
original_pos_tags = [tag for (token, tag) in pos_tuples] # keep the original POS tag (not the wordnet tag)
# match each token with their original pos-tag
return [(lemmatized_pos_tokens[i], original_pos_tags[i]) for i in range(len(pos_tuples))]
def __format_pos_tuples_to_wordnet(pos_tuples):
"""
<PRIVATE CLASS METHOD> Convert the pos-tags from the given a list of pos_tuples, to the format
that is accepted by WordNet.
__format_pos_tuples_to_wordnet(pos_tuples)
pos_tuples: [(str, str)]
List of pos_tuples to be formatted.
Returns [(str, str)]; The pos-tuples with WordNet-compatable pos-tags
"""
# dictionary of the WordNet POS labels
wordnet_tags = {"J": nltk.corpus.wordnet.ADJ,
"N": nltk.corpus.wordnet.NOUN,
"V": nltk.corpus.wordnet.VERB,
"R": nltk.corpus.wordnet.ADV}
return [(token, wordnet_tags.get(tag[0], nltk.corpus.wordnet.NOUN)) for (token, tag) in pos_tuples]
##############################################################################
## Noun Phrase Methods
##############################################################################
def get_noun_phrases(
self,
sentence,
noun_phrase_format=default_noun_phrase_format,
to_lowercase=False,
singularize=False
):
"""
Derive all the noun phrases contained in a given sentence.
get_noun_phrases(self, sentence, noun_phrase_format, to_lowercase, singularize)
sentence: str
The sentence to derive the noun phrases from.
noun_phrase_format: str; optional (default = TextProcessor.noun_phrase_format)
A string specifying how the noun phrases should be formatted/structured.
to_lowercase: bool; optional (default=False)
True if noun phrases should be converted to lowercase
singularize: bool; optional (default = False)
True if individual nouns within noun phrases should be singularized.
Returns [str]; The list of noun phrases produced from the given sentence.
Note: If no pos_tuples can be derived from the sentence, it will return np.nan.
This is useful for processing on a Pandas DataFrame without worrying about types.
"""
# get all pos tuples
pos_tuples = self.get_all_pos(sentence, to_lowercase=to_lowercase, lemmatize=singularize)
# find the noun phrases based on the noun phrase format
pos_tuples_noun_phrases = TextProcessor.__build_noun_phrases(pos_tuples, noun_phrase_format)
if not isinstance(pos_tuples_noun_phrases, list):
return np.nan
return [token for (token, tag) in pos_tuples_noun_phrases if tag == 'NP']
def __build_noun_phrases(
pos_tuples,
noun_phrase_format=default_noun_phrase_format
):
"""
<PRIVATE CLASS METHOD> Build the noun phrases by combining adjacent tuples that form a noun
phrase. Returns the list of pos_tuples with the noun phrases combined and labelled with the
tag 'NP'.
__build_noun_phrases(pos_tuples, noun_phrase_format)
pos_tuples: [(str, str)]
The list of pos_tuples to derive noun phrases from.
noun_phrase_format: str; optional (default = TextProcessor.default_noun_phrase_format)
A string specifying how the noun phrases should be formatted/structured.
Returns [(str, str)]; A list of pos_tuples containing noun phrases produced from the
original list of pos_tuples. The noun phrases are assigned the tag 'NP'
Note: If pos_tuples is not a list, it will return np.nan. This is useful for processing on a
Pandas DataFrame without worrying about types.
"""
if not isinstance(pos_tuples, list):
return np.nan
chunk_parser = nltk.RegexpParser(noun_phrase_format) # define the noun phrase parser
parsed_sentence = chunk_parser.parse(pos_tuples) # parse the sentence
pos_tuples_noun_phrases = []
for chunk in parsed_sentence:
if isinstance(chunk, nltk.tree.Tree): # found a noun phrase to add
noun_phrase = "" # build the noun phrase
for i in range(len(chunk)):
if i == len(chunk) - 1:
noun_phrase += chunk[i][0]
else:
noun_phrase += chunk[i][0] + " "
pos_tuples_noun_phrases.append((noun_phrase, 'NP'))
else:
pos_tuples_noun_phrases.append(chunk)
return pos_tuples_noun_phrases
##############################################################################
## Processing Methods
##############################################################################
def process(
self,
sentence,
to_lowercase=True,
preserve_noun_phrases=False,
remove_numbers=True,
custom_processing=lambda x:x
):
"""
Tokenize, lemmatize and remove stopwords from a given sentence. Returns a list of tokens.
Optionally convert tokens to lowercase, preserve noun phrases, remove numbers, and apply
custom processing. This method is intended for text pre-processing for machine learning
and other AI algorithms such as topic modelling, sentiment analysis, etc.
process(self, sentence, to_lowercase, preserve_noun_phrases, remove_numbers, custom_processing)
sentence: str
The sentence to be processed.
to_lowercase: bool; optional (default = True)
True if tokens should be converted to lowercase.
preserve_noun_phrases: bool; optional (default = False)
True if noun phrases should be preserved in the list of tokens.
remove_numbers: bool; optional (default = True)
True if numbers/digits should be excluded from the list of tokens.
custom_processing: str -> str; optional (default = lambda x: x)
A function that takes in the sentence string and returns a string.
Returns [str]; The list of lemmatized and non-stopword tokens from the given sentence.
Note: If no pos_tuples can be derived from the sentence or if the sentence cannot be casted
to a string, it will return np.nan. This is useful for processing on a Pandas DataFrame
without worrying about types.
"""
sentence = str(sentence)
if sentence == 'nan':
return np.nan
sentence = custom_processing(sentence) # apply custom processing step
pos_tuples = self.get_all_pos(sentence, to_lowercase=to_lowercase, lemmatize=True) # get parts-of-speech
# collect noun phrases if applicable
if preserve_noun_phrases:
pos_tuples = TextProcessor.__build_noun_phrases(pos_tuples)
if not isinstance(pos_tuples, list):
return np.nan
# remove pos tags
tokens = [token for (token, tag) in pos_tuples]
# remove stopwords and punctuation
filtered_tokens = []
for token in tokens:
if (
token.lower() not in self.stopwords
and token not in TextProcessor.punctuation
):
filtered_tokens.append(token)
if remove_numbers:
filtered_tokens = list(filter(lambda token: re.search("[A-Za-z]", token) is not None, filtered_tokens))
return filtered_tokens if len(filtered_tokens) > 0 else np.nan | 0.600657 | 0.295516 |
import jwt
import os
from flask import request, jsonify
from functools import wraps
from config import ENABLE_OBT_OAUTH, AUTH_CLIENT_SECRET_KEY, \
AUTH_CLIENT_AUDIENCE
def get_token():
try:
bearer, authorization = request.headers['Authorization'].split()
if 'bearer' not in bearer.lower():
return jsonify('Invalid token. Please login!'), 403
return authorization
except Exception:
return jsonify('Token is required. Please login!'), 403
def validate_scope(scope_required, scope_token):
if scope_required:
service, function, actions = scope_required.split(':')
if (service != scope_token['type'] and scope_token['type'] != '*') or \
(function != scope_token['name'] and scope_token['name'] != '*') or \
(actions not in scope_token['actions'] and '*' not in scope_token['actions']):
return jsonify('Scope not allowed!'), 401
def require_oauth_scopes(scope):
def jwt_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
# auth disabled
if not ENABLE_OBT_OAUTH or int(ENABLE_OBT_OAUTH) == 0:
return func(*args, **kwargs)
# auth enabled
if not AUTH_CLIENT_SECRET_KEY:
return jsonify('Set CLIENT_SECRET_KEY in environment variable'), 500
if not AUTH_CLIENT_AUDIENCE:
return jsonify('Set CLIENT_AUDIENCE in environment variable'), 500
try:
token = get_token()
payload = jwt.decode(token, AUTH_CLIENT_SECRET_KEY, verify=True,
algorithms=['HS512'], audience=AUTH_CLIENT_AUDIENCE)
if payload.get('user_id'):
request.user_id = payload['user_id']
validate_scope(scope, payload['access'][0])
return func(*args, **kwargs)
else:
return jsonify('Incomplete token. Please login!'), 403
except jwt.ExpiredSignatureError:
return jsonify('This token has expired. Please login!'), 403
except jwt.InvalidTokenError:
return jsonify('Invalid token. Please login!'), 403
return wrapper
return jwt_required | cube-builder-aws/cube_builder_aws/utils/auth.py | import jwt
import os
from flask import request, jsonify
from functools import wraps
from config import ENABLE_OBT_OAUTH, AUTH_CLIENT_SECRET_KEY, \
AUTH_CLIENT_AUDIENCE
def get_token():
try:
bearer, authorization = request.headers['Authorization'].split()
if 'bearer' not in bearer.lower():
return jsonify('Invalid token. Please login!'), 403
return authorization
except Exception:
return jsonify('Token is required. Please login!'), 403
def validate_scope(scope_required, scope_token):
if scope_required:
service, function, actions = scope_required.split(':')
if (service != scope_token['type'] and scope_token['type'] != '*') or \
(function != scope_token['name'] and scope_token['name'] != '*') or \
(actions not in scope_token['actions'] and '*' not in scope_token['actions']):
return jsonify('Scope not allowed!'), 401
def require_oauth_scopes(scope):
def jwt_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
# auth disabled
if not ENABLE_OBT_OAUTH or int(ENABLE_OBT_OAUTH) == 0:
return func(*args, **kwargs)
# auth enabled
if not AUTH_CLIENT_SECRET_KEY:
return jsonify('Set CLIENT_SECRET_KEY in environment variable'), 500
if not AUTH_CLIENT_AUDIENCE:
return jsonify('Set CLIENT_AUDIENCE in environment variable'), 500
try:
token = get_token()
payload = jwt.decode(token, AUTH_CLIENT_SECRET_KEY, verify=True,
algorithms=['HS512'], audience=AUTH_CLIENT_AUDIENCE)
if payload.get('user_id'):
request.user_id = payload['user_id']
validate_scope(scope, payload['access'][0])
return func(*args, **kwargs)
else:
return jsonify('Incomplete token. Please login!'), 403
except jwt.ExpiredSignatureError:
return jsonify('This token has expired. Please login!'), 403
except jwt.InvalidTokenError:
return jsonify('Invalid token. Please login!'), 403
return wrapper
return jwt_required | 0.303113 | 0.042503 |
from typing import List, Tuple
from bson import ObjectId, errors
from fastapi import Depends, FastAPI, HTTPException, Query, status
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
from chapter6.mongodb.models import (
PostDB,
PostCreate,
PostPartialUpdate,
)
app = FastAPI()
motor_client = AsyncIOMotorClient(
"mongodb://localhost:27017"
) # Connection to the whole server
database = motor_client["chapter6_mongo"] # Single database instance
def get_database() -> AsyncIOMotorDatabase:
return database
async def pagination(
skip: int = Query(0, ge=0),
limit: int = Query(10, ge=0),
) -> Tuple[int, int]:
capped_limit = min(100, limit)
return (skip, capped_limit)
async def get_object_id(id: str) -> ObjectId:
try:
return ObjectId(id)
except (errors.InvalidId, TypeError):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
async def get_post_or_404(
id: ObjectId = Depends(get_object_id),
database: AsyncIOMotorDatabase = Depends(get_database),
) -> PostDB:
raw_post = await database["posts"].find_one({"_id": id})
if raw_post is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return PostDB(**raw_post)
@app.get("/posts")
async def list_posts(
pagination: Tuple[int, int] = Depends(pagination),
database: AsyncIOMotorDatabase = Depends(get_database),
) -> List[PostDB]:
skip, limit = pagination
query = database["posts"].find({}, skip=skip, limit=limit)
results = [PostDB(**raw_post) async for raw_post in query]
return results
@app.get("/posts/{id}", response_model=PostDB)
async def get_post(post: PostDB = Depends(get_post_or_404)) -> PostDB:
return post
@app.post("/posts", response_model=PostDB, status_code=status.HTTP_201_CREATED)
async def create_post(
post: PostCreate, database: AsyncIOMotorDatabase = Depends(get_database)
) -> PostDB:
post_db = PostDB(**post.dict())
await database["posts"].insert_one(post_db.dict(by_alias=True))
post_db = await get_post_or_404(post_db.id, database)
return post_db
@app.patch("/posts/{id}", response_model=PostDB)
async def update_post(
post_update: PostPartialUpdate,
post: PostDB = Depends(get_post_or_404),
database: AsyncIOMotorDatabase = Depends(get_database),
) -> PostDB:
await database["posts"].update_one(
{"_id": post.id}, {"$set": post_update.dict(exclude_unset=True)}
)
post_db = await get_post_or_404(post.id, database)
return post_db
@app.delete("/posts/{id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_post(
post: PostDB = Depends(get_post_or_404),
database: AsyncIOMotorDatabase = Depends(get_database),
):
await database["posts"].delete_one({"_id": post.id}) | chapter6/mongodb/app.py | from typing import List, Tuple
from bson import ObjectId, errors
from fastapi import Depends, FastAPI, HTTPException, Query, status
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
from chapter6.mongodb.models import (
PostDB,
PostCreate,
PostPartialUpdate,
)
app = FastAPI()
motor_client = AsyncIOMotorClient(
"mongodb://localhost:27017"
) # Connection to the whole server
database = motor_client["chapter6_mongo"] # Single database instance
def get_database() -> AsyncIOMotorDatabase:
return database
async def pagination(
skip: int = Query(0, ge=0),
limit: int = Query(10, ge=0),
) -> Tuple[int, int]:
capped_limit = min(100, limit)
return (skip, capped_limit)
async def get_object_id(id: str) -> ObjectId:
try:
return ObjectId(id)
except (errors.InvalidId, TypeError):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
async def get_post_or_404(
id: ObjectId = Depends(get_object_id),
database: AsyncIOMotorDatabase = Depends(get_database),
) -> PostDB:
raw_post = await database["posts"].find_one({"_id": id})
if raw_post is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return PostDB(**raw_post)
@app.get("/posts")
async def list_posts(
pagination: Tuple[int, int] = Depends(pagination),
database: AsyncIOMotorDatabase = Depends(get_database),
) -> List[PostDB]:
skip, limit = pagination
query = database["posts"].find({}, skip=skip, limit=limit)
results = [PostDB(**raw_post) async for raw_post in query]
return results
@app.get("/posts/{id}", response_model=PostDB)
async def get_post(post: PostDB = Depends(get_post_or_404)) -> PostDB:
return post
@app.post("/posts", response_model=PostDB, status_code=status.HTTP_201_CREATED)
async def create_post(
post: PostCreate, database: AsyncIOMotorDatabase = Depends(get_database)
) -> PostDB:
post_db = PostDB(**post.dict())
await database["posts"].insert_one(post_db.dict(by_alias=True))
post_db = await get_post_or_404(post_db.id, database)
return post_db
@app.patch("/posts/{id}", response_model=PostDB)
async def update_post(
post_update: PostPartialUpdate,
post: PostDB = Depends(get_post_or_404),
database: AsyncIOMotorDatabase = Depends(get_database),
) -> PostDB:
await database["posts"].update_one(
{"_id": post.id}, {"$set": post_update.dict(exclude_unset=True)}
)
post_db = await get_post_or_404(post.id, database)
return post_db
@app.delete("/posts/{id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_post(
post: PostDB = Depends(get_post_or_404),
database: AsyncIOMotorDatabase = Depends(get_database),
):
await database["posts"].delete_one({"_id": post.id}) | 0.740737 | 0.113776 |
from .keys_and_values import KeysAndValues, deduplicate
class Dictish:
def __init__(self, key_value_pairs=None):
"""
Creates a new Dictish.
>>> Dictish()
Dictish()
Given a sequence of key-value pairs, the input is deduplicated on the keys.
>>> Dictish([("a", 1), ("b", 2), ("a", 3)])
Dictish([('a', 3), ('b', 2)])
"""
self.keys_and_values = KeysAndValues(key_value_pairs)
def __add__(self, key_and_value):
"""
>>> Dictish([("a", 1), ("b", 2)]) + ("c", 3)
Dictish([('a', 1), ('b', 2), ('c', 3)])
"""
return self | self.__class__([key_and_value])
def __call__(self, key, default=None):
"""
>>> Dictish([("a", 1), ("b", 2)])("a")
1
>>> Dictish([("a", 1), ("b", 2)])("c") is None
True
>>> Dictish([("a", 1), ("b", 2)])("c", 3)
3
"""
return self.get(key, default)
def __eq__(self, other):
"""
Two Dictish are equal if they contain the same key-value pairs, regardless of order.
>>> Dictish([("a", 1), ("b", 2)]) == Dictish([("b", 2), ("a", 1)])
True
>>> Dictish([("a", 1)]) == Dictish([("b", 2), ("a", 1)])
False
>>> Dictish([("a", 1), ("b", 2)]) == Dictish([("b", 2)])
False
"""
return len(self) == len(other) and all(
(key_and_value in self.items() for key_and_value in other.items())
)
def __ge__(self, other):
"""
Tests whether this dictish is a superset of another object responding to items.
"""
return set(self.items()) >= set(other.items())
def __gt__(self, other):
"""
Tests whether this dictish is a strict superset of another object responding to items.
"""
return set(self.items()) > set(other.items())
def __getitem__(self, lookup_key):
"""
>>> Dictish([("a", 1), ("b", 2)])["a"]
1
"""
try:
return next(value for key, value in self.items() if key == lookup_key)
except StopIteration:
raise KeyError(lookup_key)
def __iter__(self):
"""
Iterates over the keys of the Dictish.
>>> list(iter(Dictish([("key", "value")])))
['key']
"""
return self.keys()
def __le__(self, other):
"""
Tests whether this dictish is a subset of another object responding to items.
"""
return set(self.items()) <= set(other.items())
def __len__(self):
"""
>>> len(Dictish([("key", "value")]))
1
"""
return len(self.keys_and_values)
def __lt__(self, other):
"""
Tests whether this dictish is a strict subset of another object responding to items.
"""
return set(self.items()) < set(other.items())
def __or__(self, other):
"""
>>> Dictish([("a", 1), ("b", 2)]) | Dictish([("a", 4), ("c", 3)])
Dictish([('a', 4), ('b', 2), ('c', 3)])
"""
return self.__class__(deduplicate(other.items(), list(self.keys()), list(self.values())))
def __repr__(self):
"""
>>> repr(Dictish([("a", 1), ("b", 2)]))
"Dictish([('a', 1), ('b', 2)])"
"""
keys_and_values = self.keys_and_values if self.keys_and_values else ""
return f"{self.__class__.__name__}({keys_and_values})"
def __str__(self):
"""
>>> str(Dictish([("a", 1), ("b", 2)]))
"{'a': 1, 'b': 2}"
"""
pairs = (f"'{key}': {value}" for key, value in self.items())
return "{" + ", ".join(pairs) + "}"
def get(self, key, default=None):
"""
>>> Dictish([("a", 1), ("b", 2)]).get("a")
1
>>> Dictish([("a", 1), ("b", 2)]).get("c") is None
True
>>> Dictish([("a", 1), ("b", 2)]).get("c", 3)
3
"""
try:
return self[key]
except KeyError:
return default
def items(self):
"""
>>> list(Dictish([("a", 1), ("b", 2)]).items())
[('a', 1), ('b', 2)]
"""
return iter(self.keys_and_values)
def keys(self):
"""
>>> list(Dictish([("a", 1), ("b", 2)]).keys())
['a', 'b']
"""
return (key for key, value in self.keys_and_values)
def values(self):
"""
>>> list(Dictish([("a", 1), ("b", 2)]).values())
[1, 2]
"""
return (value for key, value in self.keys_and_values) | src/dictish/dictish.py | from .keys_and_values import KeysAndValues, deduplicate
class Dictish:
def __init__(self, key_value_pairs=None):
"""
Creates a new Dictish.
>>> Dictish()
Dictish()
Given a sequence of key-value pairs, the input is deduplicated on the keys.
>>> Dictish([("a", 1), ("b", 2), ("a", 3)])
Dictish([('a', 3), ('b', 2)])
"""
self.keys_and_values = KeysAndValues(key_value_pairs)
def __add__(self, key_and_value):
"""
>>> Dictish([("a", 1), ("b", 2)]) + ("c", 3)
Dictish([('a', 1), ('b', 2), ('c', 3)])
"""
return self | self.__class__([key_and_value])
def __call__(self, key, default=None):
"""
>>> Dictish([("a", 1), ("b", 2)])("a")
1
>>> Dictish([("a", 1), ("b", 2)])("c") is None
True
>>> Dictish([("a", 1), ("b", 2)])("c", 3)
3
"""
return self.get(key, default)
def __eq__(self, other):
"""
Two Dictish are equal if they contain the same key-value pairs, regardless of order.
>>> Dictish([("a", 1), ("b", 2)]) == Dictish([("b", 2), ("a", 1)])
True
>>> Dictish([("a", 1)]) == Dictish([("b", 2), ("a", 1)])
False
>>> Dictish([("a", 1), ("b", 2)]) == Dictish([("b", 2)])
False
"""
return len(self) == len(other) and all(
(key_and_value in self.items() for key_and_value in other.items())
)
def __ge__(self, other):
"""
Tests whether this dictish is a superset of another object responding to items.
"""
return set(self.items()) >= set(other.items())
def __gt__(self, other):
"""
Tests whether this dictish is a strict superset of another object responding to items.
"""
return set(self.items()) > set(other.items())
def __getitem__(self, lookup_key):
"""
>>> Dictish([("a", 1), ("b", 2)])["a"]
1
"""
try:
return next(value for key, value in self.items() if key == lookup_key)
except StopIteration:
raise KeyError(lookup_key)
def __iter__(self):
"""
Iterates over the keys of the Dictish.
>>> list(iter(Dictish([("key", "value")])))
['key']
"""
return self.keys()
def __le__(self, other):
"""
Tests whether this dictish is a subset of another object responding to items.
"""
return set(self.items()) <= set(other.items())
def __len__(self):
"""
>>> len(Dictish([("key", "value")]))
1
"""
return len(self.keys_and_values)
def __lt__(self, other):
"""
Tests whether this dictish is a strict subset of another object responding to items.
"""
return set(self.items()) < set(other.items())
def __or__(self, other):
"""
>>> Dictish([("a", 1), ("b", 2)]) | Dictish([("a", 4), ("c", 3)])
Dictish([('a', 4), ('b', 2), ('c', 3)])
"""
return self.__class__(deduplicate(other.items(), list(self.keys()), list(self.values())))
def __repr__(self):
"""
>>> repr(Dictish([("a", 1), ("b", 2)]))
"Dictish([('a', 1), ('b', 2)])"
"""
keys_and_values = self.keys_and_values if self.keys_and_values else ""
return f"{self.__class__.__name__}({keys_and_values})"
def __str__(self):
"""
>>> str(Dictish([("a", 1), ("b", 2)]))
"{'a': 1, 'b': 2}"
"""
pairs = (f"'{key}': {value}" for key, value in self.items())
return "{" + ", ".join(pairs) + "}"
def get(self, key, default=None):
"""
>>> Dictish([("a", 1), ("b", 2)]).get("a")
1
>>> Dictish([("a", 1), ("b", 2)]).get("c") is None
True
>>> Dictish([("a", 1), ("b", 2)]).get("c", 3)
3
"""
try:
return self[key]
except KeyError:
return default
def items(self):
"""
>>> list(Dictish([("a", 1), ("b", 2)]).items())
[('a', 1), ('b', 2)]
"""
return iter(self.keys_and_values)
def keys(self):
"""
>>> list(Dictish([("a", 1), ("b", 2)]).keys())
['a', 'b']
"""
return (key for key, value in self.keys_and_values)
def values(self):
"""
>>> list(Dictish([("a", 1), ("b", 2)]).values())
[1, 2]
"""
return (value for key, value in self.keys_and_values) | 0.787237 | 0.539529 |
import numpy as np
from sklearn.neighbors import KernelDensity
from ..utils.smoothing import bspline
def density_estimation(sample, X, h, kernel="epanechnikov"):
"""Kernel Density Estimation over the sample in domain X.
Routine for `sklearn.neighbors.KernelDensity`.
Args:
sample (np.array): Sample of observations. shape: (n_samples, n_features) List of n_features-dimensional data
points. Each row corresponds to a single data point.
X (np.array): Domain in which the density is estimated. An array of points to query. Last dimension should match
dimension of training data. shape: (n_estimates, n_features)
h (float): Bandwidth of the kernel. Needs to be chosen wisely or estimated. Sensitive parameter.
kernel (str, optional): The kernel to use for the estimation, so far only the Epanechnikov kernel is
implemented. Defaults to "epanechnikov".
Returns:
[np.array]: The array of log(density) evaluations. These are normalized to be probability densities, so values
will be low for high-dimensional data. shape: (n_estimates,)
"""
kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))
log_dens = kde.score_samples(X.reshape(-1, 1))
density = np.exp(log_dens)
return density
def pointwise_density_trafo_K2M(K, q_K, S_vals, M_vals):
"""Pointwise density transformation from K (Strike Price) to M (Moneyness) domain. M = S/K
First, a spline has to be fitted to q_K, so that it is possible to extract the q_K-value at every point of
interest, not just at the known points K.
Then, it is iterated through the (M, S)-tuples and the density q_K is transformed to q_M.
Args:
K (np.array): Strike Price values for which the density q_K is know.
q_K (np.array): Density values in Strike Price domain.
S_vals (array-like): Prices of underlying for the density points.
M_vals (array-like): Moneyness values for the density point.
Returns:
[np.array]: Density values in Moneyness domain.
"""
_, q_K, _ = bspline(K, q_K, 15) # fit spline to q_K
num = len(M_vals)
q_pointsM = np.zeros(num)
# loop through (M, S)-tuples and calculate the q_M value at this point
for i, m, s in zip(range(num), M_vals, S_vals):
q_pointsM[i] = s / (m ** 2) * q_K(s / m)
return q_pointsM
def density_trafo_K2M(K, q_K, S):
"""Density transformation from K (Strike Price) to M (Moneyness) domain. M = S/K
First, a spline has to be fitted to q_K, so that it is possible to extract the q_K-value at every point of
interest, not just at the known points K.
Then, it is iterated through the (M, S)-tuples and the density q_K is transformed to q_M.
Args:
K (np.array): Strike Price values for which the density q_K is know.
q_K (np.array): Density values in Strike Price domain.
S (array-like): Prices of underlying for the density points.
Returns:
[np.array]: Density values in Moneyness domain.
"""
_, q_K, _ = bspline(K, q_K, 30)
num = len(K)
M = np.linspace(0.5, 1.5, num)
q_M = np.zeros(num)
for i, m in enumerate(M):
q_M[i] = S / (m ** 2) * q_K(S / m)
return M, q_M | spd_trading/utils/density.py | import numpy as np
from sklearn.neighbors import KernelDensity
from ..utils.smoothing import bspline
def density_estimation(sample, X, h, kernel="epanechnikov"):
"""Kernel Density Estimation over the sample in domain X.
Routine for `sklearn.neighbors.KernelDensity`.
Args:
sample (np.array): Sample of observations. shape: (n_samples, n_features) List of n_features-dimensional data
points. Each row corresponds to a single data point.
X (np.array): Domain in which the density is estimated. An array of points to query. Last dimension should match
dimension of training data. shape: (n_estimates, n_features)
h (float): Bandwidth of the kernel. Needs to be chosen wisely or estimated. Sensitive parameter.
kernel (str, optional): The kernel to use for the estimation, so far only the Epanechnikov kernel is
implemented. Defaults to "epanechnikov".
Returns:
[np.array]: The array of log(density) evaluations. These are normalized to be probability densities, so values
will be low for high-dimensional data. shape: (n_estimates,)
"""
kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))
log_dens = kde.score_samples(X.reshape(-1, 1))
density = np.exp(log_dens)
return density
def pointwise_density_trafo_K2M(K, q_K, S_vals, M_vals):
"""Pointwise density transformation from K (Strike Price) to M (Moneyness) domain. M = S/K
First, a spline has to be fitted to q_K, so that it is possible to extract the q_K-value at every point of
interest, not just at the known points K.
Then, it is iterated through the (M, S)-tuples and the density q_K is transformed to q_M.
Args:
K (np.array): Strike Price values for which the density q_K is know.
q_K (np.array): Density values in Strike Price domain.
S_vals (array-like): Prices of underlying for the density points.
M_vals (array-like): Moneyness values for the density point.
Returns:
[np.array]: Density values in Moneyness domain.
"""
_, q_K, _ = bspline(K, q_K, 15) # fit spline to q_K
num = len(M_vals)
q_pointsM = np.zeros(num)
# loop through (M, S)-tuples and calculate the q_M value at this point
for i, m, s in zip(range(num), M_vals, S_vals):
q_pointsM[i] = s / (m ** 2) * q_K(s / m)
return q_pointsM
def density_trafo_K2M(K, q_K, S):
"""Density transformation from K (Strike Price) to M (Moneyness) domain. M = S/K
First, a spline has to be fitted to q_K, so that it is possible to extract the q_K-value at every point of
interest, not just at the known points K.
Then, it is iterated through the (M, S)-tuples and the density q_K is transformed to q_M.
Args:
K (np.array): Strike Price values for which the density q_K is know.
q_K (np.array): Density values in Strike Price domain.
S (array-like): Prices of underlying for the density points.
Returns:
[np.array]: Density values in Moneyness domain.
"""
_, q_K, _ = bspline(K, q_K, 30)
num = len(K)
M = np.linspace(0.5, 1.5, num)
q_M = np.zeros(num)
for i, m in enumerate(M):
q_M[i] = S / (m ** 2) * q_K(S / m)
return M, q_M | 0.961144 | 0.91611 |
import glob
import os
import shutil
import tempfile
from resource_management.core import shell
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Execute
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.libraries.functions import Direction
from resource_management.libraries.functions import format
from resource_management.libraries.functions import compare_versions
from resource_management.libraries.functions import tar_archive
from resource_management.libraries.script.script import Script
import oozie
BACKUP_TEMP_DIR = "oozie-upgrade-backup"
BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
class OozieUpgrade(Script):
@staticmethod
def backup_configuration():
"""
Backs up the oozie configuration as part of the upgrade process.
:return:
"""
Logger.info('Backing up Oozie configuration directory before upgrade...')
directoryMappings = OozieUpgrade._get_directory_mappings()
absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
if not os.path.isdir(absolute_backup_dir):
os.makedirs(absolute_backup_dir)
for directory in directoryMappings:
if not os.path.isdir(directory):
raise Fail("Unable to backup missing directory {0}".format(directory))
archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
Logger.info('Compressing {0} to {1}'.format(directory, archive))
if os.path.exists(archive):
os.remove(archive)
# backup the directory, following symlinks instead of including them
tar_archive.archive_directory_dereference(archive, directory)
@staticmethod
def restore_configuration():
"""
Restores the configuration backups to their proper locations after an
upgrade has completed.
:return:
"""
Logger.info('Restoring Oozie configuration directory after upgrade...')
directoryMappings = OozieUpgrade._get_directory_mappings()
for directory in directoryMappings:
archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
directoryMappings[directory])
if not os.path.isfile(archive):
raise Fail("Unable to restore missing backup archive {0}".format(archive))
Logger.info('Extracting {0} to {1}'.format(archive, directory))
tar_archive.untar_archive(archive, directory)
# cleanup
Directory(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR),
action="delete",
)
@staticmethod
def prepare_warfile():
"""
Invokes the 'prepare-war' command in Oozie in order to create the WAR.
The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
both of these environment variables must point to the upgraded oozie-server path and
not oozie-client since it was not yet updated.
This method will also perform a kinit if necessary.
:return:
"""
import params
# get the kerberos token if necessary to execute commands as oozie
if params.security_enabled:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
Execute(command, user=params.oozie_user, logoutput=True)
# setup environment
environment = { "CATALINA_BASE" : "/usr/hdp/current/oozie-server/oozie-server",
"OOZIE_HOME" : "/usr/hdp/current/oozie-server" }
# prepare the oozie WAR
command = format("{oozie_setup_sh} prepare-war {oozie_secure} -d {oozie_libext_dir}")
return_code, oozie_output = shell.call(command, user=params.oozie_user,
logoutput=False, quiet=False, env=environment)
# set it to "" in to prevent a possible iteration issue
if oozie_output is None:
oozie_output = ""
if return_code != 0 or "New Oozie WAR file with added".lower() not in oozie_output.lower():
message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
Logger.error(message)
raise Fail(message)
@staticmethod
def _get_directory_mappings():
"""
Gets a dictionary of directory to archive name that represents the
directories that need to be backed up and their output tarball archive targets
:return: the dictionary of directory to tarball mappings
"""
import params
# the trailing "/" is important here so as to not include the "conf" folder itself
return { params.conf_dir + "/" : BACKUP_CONF_ARCHIVE }
if __name__ == "__main__":
OozieUpgrade().execute() | ambari-server/src/main/resources/stacks/ADH/1.4/services/OOZIE/package/scripts/oozie_server_upgrade.py | import glob
import os
import shutil
import tempfile
from resource_management.core import shell
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Execute
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.libraries.functions import Direction
from resource_management.libraries.functions import format
from resource_management.libraries.functions import compare_versions
from resource_management.libraries.functions import tar_archive
from resource_management.libraries.script.script import Script
import oozie
BACKUP_TEMP_DIR = "oozie-upgrade-backup"
BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
class OozieUpgrade(Script):
@staticmethod
def backup_configuration():
"""
Backs up the oozie configuration as part of the upgrade process.
:return:
"""
Logger.info('Backing up Oozie configuration directory before upgrade...')
directoryMappings = OozieUpgrade._get_directory_mappings()
absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
if not os.path.isdir(absolute_backup_dir):
os.makedirs(absolute_backup_dir)
for directory in directoryMappings:
if not os.path.isdir(directory):
raise Fail("Unable to backup missing directory {0}".format(directory))
archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
Logger.info('Compressing {0} to {1}'.format(directory, archive))
if os.path.exists(archive):
os.remove(archive)
# backup the directory, following symlinks instead of including them
tar_archive.archive_directory_dereference(archive, directory)
@staticmethod
def restore_configuration():
"""
Restores the configuration backups to their proper locations after an
upgrade has completed.
:return:
"""
Logger.info('Restoring Oozie configuration directory after upgrade...')
directoryMappings = OozieUpgrade._get_directory_mappings()
for directory in directoryMappings:
archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
directoryMappings[directory])
if not os.path.isfile(archive):
raise Fail("Unable to restore missing backup archive {0}".format(archive))
Logger.info('Extracting {0} to {1}'.format(archive, directory))
tar_archive.untar_archive(archive, directory)
# cleanup
Directory(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR),
action="delete",
)
@staticmethod
def prepare_warfile():
"""
Invokes the 'prepare-war' command in Oozie in order to create the WAR.
The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
both of these environment variables must point to the upgraded oozie-server path and
not oozie-client since it was not yet updated.
This method will also perform a kinit if necessary.
:return:
"""
import params
# get the kerberos token if necessary to execute commands as oozie
if params.security_enabled:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
Execute(command, user=params.oozie_user, logoutput=True)
# setup environment
environment = { "CATALINA_BASE" : "/usr/hdp/current/oozie-server/oozie-server",
"OOZIE_HOME" : "/usr/hdp/current/oozie-server" }
# prepare the oozie WAR
command = format("{oozie_setup_sh} prepare-war {oozie_secure} -d {oozie_libext_dir}")
return_code, oozie_output = shell.call(command, user=params.oozie_user,
logoutput=False, quiet=False, env=environment)
# set it to "" in to prevent a possible iteration issue
if oozie_output is None:
oozie_output = ""
if return_code != 0 or "New Oozie WAR file with added".lower() not in oozie_output.lower():
message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
Logger.error(message)
raise Fail(message)
@staticmethod
def _get_directory_mappings():
"""
Gets a dictionary of directory to archive name that represents the
directories that need to be backed up and their output tarball archive targets
:return: the dictionary of directory to tarball mappings
"""
import params
# the trailing "/" is important here so as to not include the "conf" folder itself
return { params.conf_dir + "/" : BACKUP_CONF_ARCHIVE }
if __name__ == "__main__":
OozieUpgrade().execute() | 0.345657 | 0.083404 |
from abc import ABC, abstractmethod
import copy
class Oracle(ABC):
""" An abstract interface of functions.
`Oracle` provides a unified interface for defining optimization
objectives, or building function approximators, etc.
The user would want to implement the following methods:
`fun` returns the function value given an input.
`grad` returns the gradient with respect to an input.
`hess` returns the Hessian with respect to an input.
`hvp` returns the Hessia-vector-product with respect to an input.
`update` redefines the function.
Implementation of all these methods is not mandatory. For example, the
gradient of the function might not be defined.
Finally, a subclass of `Oracle` should be copy.deepcopy compatible. For
convenience, we overload __deepcopy__ to include an `exclude` list, in
order not to deepcopy some attributes.
"""
def fun(self, x, **kwargs):
""" Return the function value given an input. """
raise NotImplementedError
def grad(self, x, **kwargs):
""" Return the gradient with respect to an input as np.ndarray(s). """
raise NotImplementedError
def hess(self, x, **kwargs):
""" Return the Hessian with respect to an input as np.ndarray(s). """
raise NotImplementedError
def hvp(self, x, g, **kwargs):
""" Return the product between Hessian and a vector `g` with respect to
an input as np.ndarray(s). """
raise NotImplementedError
def update(self, *args, **kwargs):
""" Redefine the function. """
raise NotImplementedError
def assign(self, other, excludes=()):
""" Set the parameters as others. """
assert type(self)==type(other)
keys = [ k for k in other.__dict__ if not k in excludes ]
for k in keys:
self.__dict__[k] = copy.deepcopy(other.__dict__[k])
def __deepcopy__(self, memo, excludes=()):
""" __deepcopy__ but with an exclusion list
excludes is a list of attribute names (string) that is to be shallow copied.
"""
assert isinstance(memo, dict)
new = copy.copy(self) # so it has all the attributes
memo[id(self)] = new # prevent loop
if hasattr(self,'__getstate__'):
d = self.__getstate__()
else:
d = self.__dict__
# don't deepcopy the items in `excludes`
d = {k:v for k,v in d.items() if not k in excludes}
# deepcopy others
d = copy.deepcopy(d, memo)
if hasattr(new,'__setstate__'):
new.__setstate__(d)
else:
new.__dict__.update(d)
return new | rl/core/oracles/oracle.py |
from abc import ABC, abstractmethod
import copy
class Oracle(ABC):
""" An abstract interface of functions.
`Oracle` provides a unified interface for defining optimization
objectives, or building function approximators, etc.
The user would want to implement the following methods:
`fun` returns the function value given an input.
`grad` returns the gradient with respect to an input.
`hess` returns the Hessian with respect to an input.
`hvp` returns the Hessia-vector-product with respect to an input.
`update` redefines the function.
Implementation of all these methods is not mandatory. For example, the
gradient of the function might not be defined.
Finally, a subclass of `Oracle` should be copy.deepcopy compatible. For
convenience, we overload __deepcopy__ to include an `exclude` list, in
order not to deepcopy some attributes.
"""
def fun(self, x, **kwargs):
""" Return the function value given an input. """
raise NotImplementedError
def grad(self, x, **kwargs):
""" Return the gradient with respect to an input as np.ndarray(s). """
raise NotImplementedError
def hess(self, x, **kwargs):
""" Return the Hessian with respect to an input as np.ndarray(s). """
raise NotImplementedError
def hvp(self, x, g, **kwargs):
""" Return the product between Hessian and a vector `g` with respect to
an input as np.ndarray(s). """
raise NotImplementedError
def update(self, *args, **kwargs):
""" Redefine the function. """
raise NotImplementedError
def assign(self, other, excludes=()):
""" Set the parameters as others. """
assert type(self)==type(other)
keys = [ k for k in other.__dict__ if not k in excludes ]
for k in keys:
self.__dict__[k] = copy.deepcopy(other.__dict__[k])
def __deepcopy__(self, memo, excludes=()):
""" __deepcopy__ but with an exclusion list
excludes is a list of attribute names (string) that is to be shallow copied.
"""
assert isinstance(memo, dict)
new = copy.copy(self) # so it has all the attributes
memo[id(self)] = new # prevent loop
if hasattr(self,'__getstate__'):
d = self.__getstate__()
else:
d = self.__dict__
# don't deepcopy the items in `excludes`
d = {k:v for k,v in d.items() if not k in excludes}
# deepcopy others
d = copy.deepcopy(d, memo)
if hasattr(new,'__setstate__'):
new.__setstate__(d)
else:
new.__dict__.update(d)
return new | 0.858244 | 0.680534 |
from discord.ext import commands
import discord
from typing import Union
import asyncio
def embed_to_string(embed: discord.Embed) -> str:
"""Convert a embed to a string"""
string = ""
if embed.author:
string = f'{embed.author.name}\n'
if embed.title:
string += f'{embed.title}\n'
if embed.description:
string += f'{embed.description}\n'
for field in embed.fields:
string += f'{field.title}\n{field.value}\n'
if embed.footer:
string += f'{embed.footer}'
return string
class SyltesContext(commands.Context):
async def send(self, content=None, *, tts=False, embed=None, file=None, files=None, delete_after=None, nonce=None) \
-> Union[discord.Message, None]:
"""Better handling of missing permissions"""
destination = self.channel
if self.guild:
permissions = self.guild.me.permissions_in(self.channel)
if not permissions.send_messages:
try:
destination = self.author
await destination.send(f'I was missing permissions to send messages in {self.channel.mention}.')
except discord.Forbidden:
pass
if not permissions.embed_links and embed is not None:
string = embed_to_string(embed)
await self.author.send(string)
embed = None
if not permissions.attach_files and (file or files):
await destination.send(f'Missing permission to send files in {self.channel.mention}\nCheck your DMs')
files = files or [file]
for file in files:
await self.author.send(file=file)
return
return await destination.send(content=content, tts=tts, embed=embed, file=file)
@staticmethod
async def cleanup(*messages, delay: float = 0.0) -> None:
"""Shortcut for deleting messages, with optional delay param"""
async def do_deletion(msg):
await asyncio.sleep(delay)
try:
await msg.delete()
except discord.Forbidden:
pass
for message in messages:
asyncio.ensure_future(do_deletion(message))
async def prompt_reply(self, message: str, *, timeout=60.0, delete_after=True, author_id=None) -> Union[str, None]:
"""Prompt a text reply from `author_id` if no response is found returns a empty string"""
author_id = author_id or self.author.id
_msg = await super().send(message)
def check(msg):
return msg.author.id == author_id and msg.channel == self.channel
try:
message = await self.bot.wait_for('message', check=check, timeout=timeout)
except asyncio.TimeoutError:
await self.send('Timed out.')
return None
try:
if delete_after:
asyncio.ensure_future(self.cleanup(message, self.message, _msg), loop=self.bot.loop)
finally:
if message.content:
return message.content
else:
return None
async def em(self, delete_after=None, **kwargs):
"""Shortcut to send embeds with `bot.em`"""
return await self.send(embed=self.bot.em(**kwargs), delete_after=delete_after)
async def send_help(self, *args):
"""No more cheating on getting help from other channels :P"""
if self.command.name in ('help', 'scoreboard', 'rep_scoreboard', 'reps', 'member_count', 'top_user', 'users',
'server_messages', 'messages'):
if self.channel.id not in (511344208955703306, 536199577284509696):
return await self.send("**Please use #bot-commands channel**")
return await super().send_help(*args) | cogs/utils/context.py | from discord.ext import commands
import discord
from typing import Union
import asyncio
def embed_to_string(embed: discord.Embed) -> str:
"""Convert a embed to a string"""
string = ""
if embed.author:
string = f'{embed.author.name}\n'
if embed.title:
string += f'{embed.title}\n'
if embed.description:
string += f'{embed.description}\n'
for field in embed.fields:
string += f'{field.title}\n{field.value}\n'
if embed.footer:
string += f'{embed.footer}'
return string
class SyltesContext(commands.Context):
async def send(self, content=None, *, tts=False, embed=None, file=None, files=None, delete_after=None, nonce=None) \
-> Union[discord.Message, None]:
"""Better handling of missing permissions"""
destination = self.channel
if self.guild:
permissions = self.guild.me.permissions_in(self.channel)
if not permissions.send_messages:
try:
destination = self.author
await destination.send(f'I was missing permissions to send messages in {self.channel.mention}.')
except discord.Forbidden:
pass
if not permissions.embed_links and embed is not None:
string = embed_to_string(embed)
await self.author.send(string)
embed = None
if not permissions.attach_files and (file or files):
await destination.send(f'Missing permission to send files in {self.channel.mention}\nCheck your DMs')
files = files or [file]
for file in files:
await self.author.send(file=file)
return
return await destination.send(content=content, tts=tts, embed=embed, file=file)
@staticmethod
async def cleanup(*messages, delay: float = 0.0) -> None:
"""Shortcut for deleting messages, with optional delay param"""
async def do_deletion(msg):
await asyncio.sleep(delay)
try:
await msg.delete()
except discord.Forbidden:
pass
for message in messages:
asyncio.ensure_future(do_deletion(message))
async def prompt_reply(self, message: str, *, timeout=60.0, delete_after=True, author_id=None) -> Union[str, None]:
"""Prompt a text reply from `author_id` if no response is found returns a empty string"""
author_id = author_id or self.author.id
_msg = await super().send(message)
def check(msg):
return msg.author.id == author_id and msg.channel == self.channel
try:
message = await self.bot.wait_for('message', check=check, timeout=timeout)
except asyncio.TimeoutError:
await self.send('Timed out.')
return None
try:
if delete_after:
asyncio.ensure_future(self.cleanup(message, self.message, _msg), loop=self.bot.loop)
finally:
if message.content:
return message.content
else:
return None
async def em(self, delete_after=None, **kwargs):
"""Shortcut to send embeds with `bot.em`"""
return await self.send(embed=self.bot.em(**kwargs), delete_after=delete_after)
async def send_help(self, *args):
"""No more cheating on getting help from other channels :P"""
if self.command.name in ('help', 'scoreboard', 'rep_scoreboard', 'reps', 'member_count', 'top_user', 'users',
'server_messages', 'messages'):
if self.channel.id not in (511344208955703306, 536199577284509696):
return await self.send("**Please use #bot-commands channel**")
return await super().send_help(*args) | 0.625552 | 0.128416 |
# 告诉解释器用 utf-8编码去读取源码,因为可能有中文
# -*- coding: utf-8 -*-
print("hello world again")
answer = 42
name = "DengXiaoBai"
print(answer)
# ----------------print---------------
print('string1','string2','string3')
print(111.222222)
print('print can print number without \'\',like this:',1111)
print('print can print any var',3333,3232.1,-1232323)
# r''里面的内容不转义
print(r'///n//')
print(r'""""')
# '''...''' 表示多行内容
print('''第一行
第二行
第三行''')
# 地板除, 只会取整数部分, 结果一定是整形
print(11 // 3)
# / 结果一定是 浮点型, 即使两个能够除尽的整数
print(9 / 3)
print(11 / 3)
# --------------input-------------
# name=input('plz input your name:')
# print('your name {0}'.format(name))
# 对象概念的 test
# 在 OC/Swift 中, 基本数据类型是直接赋值的, 对象则是传递引用
# 在 py 中, 一切皆对象, 没有什么基本类型之分
testIntA = 22
testIntB = testIntA
testIntA = 33
print(testIntB)
# 布尔值 True / False
# None 和 False 不一样
# --------------String------------
ord('中') #返回 unicode 编码对应的十进制数
chr(66) #返回 unicode 编码十进制数对应的字符(串)
# bytes 类型, 类似 data 类型
# 中文用 utf8 encode / decode 英文用ascii encode / decode
# 一般统一使用 utf8, 因为 utf8兼容 ascii
chineseStr = "我要学习"
chineseBytes = chineseStr.encode('utf-8') # encode to bytes
decodeChinese = chineseBytes.decode('utf-8',errors='ignore') # decode to string, 'ignore'忽略下部分无效字节
charsCount = decodeChinese.__len__() # 字符数
bytesCount = chineseBytes.__len__() # 字节数
print('str:{0},bytes:{1},decodeStr:{2},charsCount:{3},bytes:{4}'.format(chineseStr,chineseBytes,decodeChinese,charsCount,bytesCount))
enStr = "I'm learning py"
enBytes = enStr.encode('utf-8')
decodeEnStr = enBytes.decode('utf-8',errors='ignore')
enCharCount = enStr.__len__()
enBytesCount = enBytes.__len__()
print('str:{0},bytes:{1},decodeStr:{2},charsCount:{3},bytes:{4}'.format(enStr,enBytes,decodeEnStr,enCharCount,enBytesCount))
# 字符串格式化输出, 不知道什么类型就用字符串类型 %s
percentNum = (85 - 72) * 100 / 72
print('{0} 比去年多考了 {1:.1f}%'.format('小白', percentNum))
print('空格:{0:4d},添0:{1:04d}'.format(22,22))
# ------------list 和 tuple-------------
# 元素可以是不同类型
# list : 有序的数组, 元素的指向是可以变的,即可变的
# tuple : 元素的指向是不能变的, 即不可变 , 如果想要定义一个完全不能变的 tuple, 那么他的元素也不能变化
firstList = ['Ford',3000,True]
# 操作
firstList.append('凯迪拉克')
firstList.insert(0,'通用')
popElement = firstList.pop()
print('popElement is {0}'.format(popElement))
popElement = firstList.pop(0)
print('popElement is {0}'.format(popElement))
print('the last element is %s' % firstList[-1])
firstTuple = ('Honda',2000,False)
secTuple = ('Mazzida',) # 定义单个元素的tuple 记得加上 ,
thrTuple = ('Club',['man_city',1])
print('the tuple is : {0}'.format(thrTuple))
# thrTuple[-1] = ['kdb'] 这个是不行的 TypeError: 'tuple' object does not support item assignment 意味着元素指向是不变的
thrTuple[-1][1] = 'Champion'
print('after changing the tuple is : {0}'.format(thrTuple))
# ------------if statement, using : replace {} enrolling block-----------
# true and false values,just like OC
if '':
print('空字符串')
else:
print('空字符是 False')
if []:
print('空数组')
else:
print('空数组是 False')
if ():
print('空元组')
else:
print('空元组是 False')
if None:
print('None')
else:
print('None是 False')
# --------------- for loops for x in range / list --------------
# 实际上 range(start,stop,interval) 从 start 开始(会等于 start),在 stop 前停止(不会等于 stop)
listFromRange = list(range(5,15,4))
print('list from range is :{0}'.format(listFromRange))
for index in range(4):
print("index is {0}".format(index))
print("--------")
for index in range(1, 3):
print("index is {0}".format(index))
print("--------")
for index in range(5, 10, 6):
print("index is {0}".format(index))
# --------- Dictionary Set ------------
# Dictionary : key-value, 空间换时间,因为既要存储key 也要存储 value
# 为了保证 hash 的正确性, key 一定要保证是不可变的.例如 String / Int
# set 无序 无重复的集合 , 为了保证元素的无重复性, 加入的元素一定要是不可变的
# 避免 KeyError
student = {
"name": "DengXiaoBai",
"age": 11
}
# 删除某个 key, 对应的 value 也会被删除
testKey = 'name'
student.pop(testKey)
print('after pop student is {0}'.format(student))
# 判断有没有这个 key
if testKey in student:
print(student[testKey])
# 设置默认值 , 默认是 None
print('name is {0}'.format(student.get(testKey)))
# set 无序 无重复的集合 , 为了保证元素的无重复性, 加入的元素一定要是不可变的
set1 = set(['1',True,3333]) # 把里面的元素加入 set
set2 = set(('1',True,6666))
# set1.add(('KDB',['python'])) TypeError: unhashable type: 'list'
# 交集
print('交集: {0}'.format(set1 & set2))
# 合集
print('合集: {0}'.format(set1 | set2)) | helloworld.py |
# 告诉解释器用 utf-8编码去读取源码,因为可能有中文
# -*- coding: utf-8 -*-
print("hello world again")
answer = 42
name = "DengXiaoBai"
print(answer)
# ----------------print---------------
print('string1','string2','string3')
print(111.222222)
print('print can print number without \'\',like this:',1111)
print('print can print any var',3333,3232.1,-1232323)
# r''里面的内容不转义
print(r'///n//')
print(r'""""')
# '''...''' 表示多行内容
print('''第一行
第二行
第三行''')
# 地板除, 只会取整数部分, 结果一定是整形
print(11 // 3)
# / 结果一定是 浮点型, 即使两个能够除尽的整数
print(9 / 3)
print(11 / 3)
# --------------input-------------
# name=input('plz input your name:')
# print('your name {0}'.format(name))
# 对象概念的 test
# 在 OC/Swift 中, 基本数据类型是直接赋值的, 对象则是传递引用
# 在 py 中, 一切皆对象, 没有什么基本类型之分
testIntA = 22
testIntB = testIntA
testIntA = 33
print(testIntB)
# 布尔值 True / False
# None 和 False 不一样
# --------------String------------
ord('中') #返回 unicode 编码对应的十进制数
chr(66) #返回 unicode 编码十进制数对应的字符(串)
# bytes 类型, 类似 data 类型
# 中文用 utf8 encode / decode 英文用ascii encode / decode
# 一般统一使用 utf8, 因为 utf8兼容 ascii
chineseStr = "我要学习"
chineseBytes = chineseStr.encode('utf-8') # encode to bytes
decodeChinese = chineseBytes.decode('utf-8',errors='ignore') # decode to string, 'ignore'忽略下部分无效字节
charsCount = decodeChinese.__len__() # 字符数
bytesCount = chineseBytes.__len__() # 字节数
print('str:{0},bytes:{1},decodeStr:{2},charsCount:{3},bytes:{4}'.format(chineseStr,chineseBytes,decodeChinese,charsCount,bytesCount))
enStr = "I'm learning py"
enBytes = enStr.encode('utf-8')
decodeEnStr = enBytes.decode('utf-8',errors='ignore')
enCharCount = enStr.__len__()
enBytesCount = enBytes.__len__()
print('str:{0},bytes:{1},decodeStr:{2},charsCount:{3},bytes:{4}'.format(enStr,enBytes,decodeEnStr,enCharCount,enBytesCount))
# 字符串格式化输出, 不知道什么类型就用字符串类型 %s
percentNum = (85 - 72) * 100 / 72
print('{0} 比去年多考了 {1:.1f}%'.format('小白', percentNum))
print('空格:{0:4d},添0:{1:04d}'.format(22,22))
# ------------list 和 tuple-------------
# 元素可以是不同类型
# list : 有序的数组, 元素的指向是可以变的,即可变的
# tuple : 元素的指向是不能变的, 即不可变 , 如果想要定义一个完全不能变的 tuple, 那么他的元素也不能变化
firstList = ['Ford',3000,True]
# 操作
firstList.append('凯迪拉克')
firstList.insert(0,'通用')
popElement = firstList.pop()
print('popElement is {0}'.format(popElement))
popElement = firstList.pop(0)
print('popElement is {0}'.format(popElement))
print('the last element is %s' % firstList[-1])
firstTuple = ('Honda',2000,False)
secTuple = ('Mazzida',) # 定义单个元素的tuple 记得加上 ,
thrTuple = ('Club',['man_city',1])
print('the tuple is : {0}'.format(thrTuple))
# thrTuple[-1] = ['kdb'] 这个是不行的 TypeError: 'tuple' object does not support item assignment 意味着元素指向是不变的
thrTuple[-1][1] = 'Champion'
print('after changing the tuple is : {0}'.format(thrTuple))
# ------------if statement, using : replace {} enrolling block-----------
# true and false values,just like OC
if '':
print('空字符串')
else:
print('空字符是 False')
if []:
print('空数组')
else:
print('空数组是 False')
if ():
print('空元组')
else:
print('空元组是 False')
if None:
print('None')
else:
print('None是 False')
# --------------- for loops for x in range / list --------------
# 实际上 range(start,stop,interval) 从 start 开始(会等于 start),在 stop 前停止(不会等于 stop)
listFromRange = list(range(5,15,4))
print('list from range is :{0}'.format(listFromRange))
for index in range(4):
print("index is {0}".format(index))
print("--------")
for index in range(1, 3):
print("index is {0}".format(index))
print("--------")
for index in range(5, 10, 6):
print("index is {0}".format(index))
# --------- Dictionary Set ------------
# Dictionary : key-value, 空间换时间,因为既要存储key 也要存储 value
# 为了保证 hash 的正确性, key 一定要保证是不可变的.例如 String / Int
# set 无序 无重复的集合 , 为了保证元素的无重复性, 加入的元素一定要是不可变的
# 避免 KeyError
student = {
"name": "DengXiaoBai",
"age": 11
}
# 删除某个 key, 对应的 value 也会被删除
testKey = 'name'
student.pop(testKey)
print('after pop student is {0}'.format(student))
# 判断有没有这个 key
if testKey in student:
print(student[testKey])
# 设置默认值 , 默认是 None
print('name is {0}'.format(student.get(testKey)))
# set 无序 无重复的集合 , 为了保证元素的无重复性, 加入的元素一定要是不可变的
set1 = set(['1',True,3333]) # 把里面的元素加入 set
set2 = set(('1',True,6666))
# set1.add(('KDB',['python'])) TypeError: unhashable type: 'list'
# 交集
print('交集: {0}'.format(set1 & set2))
# 合集
print('合集: {0}'.format(set1 | set2)) | 0.120258 | 0.155335 |
import abc
import numpy as np
try:
from . import bases # Only works when imported as a package.
except (ValueError, SystemError):
import parsimony.algorithms.bases as bases # When run as a program.
from parsimony.utils import check_arrays
import parsimony.utils.consts as consts
import parsimony.functions.penalties as penalties
import parsimony.functions.properties as properties
__all__ = ["Info", "AlgorithmSnapshot",
"direct_vector",
"Bisection", "NewtonRaphson",
"BacktrackingLineSearch",
"StepSize", "SqSumNotSumStepSize", "NonSumDimStepSize",
"Kernel", "LinearKernel"]
# TODO: This class should be replaced with Enum.
class Info(object):
"""Enum-like class for information constants.
Fields may _NOT_ be None!
This class will be replaced with Enum, so do not rely on the actual values
of the fields. E.g., never use the string "ok", always use Info.ok.
"""
ok = "ok" # Did everything go well?
converged = "converged" # Did the algorithm converge?
num_iter = "num_iter" # Number of iterations.
time = "time" # Time of e.g. every iteration.
func_val = "func_val" # Function value at e.g. every iteration.
fvalue = "fvalue" # Function value at e.g. every iteration. [Deprecated!!]
smooth_func_val = "smooth_func_val" # Smoothed function value.
gap = "gap" # The gap at e.g. every iteration.
mu = "mu" # Smoothing constant, or other parameter, at every iteration.
parameter = "parameter" # Parameter(s), at e.g. every iteration.
bound = "bound" # Upper bound at e.g. every iteration.
beta = "beta" # E.g. the start vector used.
betak = "betak" # The final found vector.
beta_start = "beta_start" # The start vector used.
continuations = "continuations" # In continuation: Number of continuations
verbose = "verbose" # Tell algo to be verbose
param_start = "param_start" # The start parameters used.
param_end = "param_end" # The final parameters found by the algorithm.
iterates = "iterates" # The iterates generated by the algorithm.
acceptance_rate = "acceptance_rate" # Acceptance rate of sampling algos.
other = "other" # Any info that was not covered by the above.
# TODO: Replace beta, betak and beta_start with param_start and param_end
class AlgorithmSnapshot:
"""Save a Snapshot of the algorithm state to disk.
The save_* methods can be provided as callback argument to either FISTA or
CONESTA. This callback will be called at each iteration.
Parameters
----------
output_prefix: string a prefix path to store algorithm state.
saving_period: int the period (# of iterations) of trig the saving.
Example
-------
>>> import os
>>> import tempfile
>>> import numpy as np
>>> import parsimony.estimators as estimators
>>> import parsimony.algorithms.proximal as proximal
>>> from parsimony.algorithms.utils import AlgorithmSnapshot
>>>
>>> prefix = os.path.join(tempfile.mkdtemp(), "snapshots")
>>> snapshot = AlgorithmSnapshot(prefix, saving_period=10).save_fista
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(10, 16)
>>> y = np.random.rand(10, 1)
>>>
>>> en = estimators.ElasticNet(0.1,
... algorithm=proximal.FISTA(max_iter=50, callback=snapshot))
>>> en = en.fit(X, y)
>>> import glob
>>> print("Nb snapshots = %d" % (len(glob.glob(prefix + "*")),))
Nb snapshots = 5
"""
def __init__(self, output_prefix, saving_period=100):
self.output_prefix = output_prefix
self.saving_period = saving_period
self.cpt = 0
self.continuation_ite_nb = list() # ite nb where continuation occured
def save_conesta(self, algo_locals):
self.cpt += 1
# ite = algo_locals["i"]
if (self.cpt % self.saving_period) != 0:
return
algo = algo_locals["self"]
self.continuation_ite_nb.append(algo.num_iter)
snapshot = dict(beta=algo_locals["beta"],
continuation_ite_nb=self.continuation_ite_nb,
gM=algo_locals["gM"])
if algo.info_requested(Info.num_iter):
snapshot[Info.num_iter] = algo.num_iter
if algo.info_requested(Info.continuations):
snapshot[Info.continuations] = algo_locals["i"] + 1
if algo.info_requested(Info.time):
snapshot[Info.time] = algo_locals["t_"]
if algo.info_requested(Info.func_val):
snapshot[Info.func_val] = algo_locals["f_"]
if algo.info_requested(Info.fvalue):
snapshot[Info.fvalue] = algo_locals["f_"]
if algo.info_requested(Info.gap):
snapshot[Info.gap] = algo_locals["gap_"]
if algo.info_requested(Info.mu):
snapshot[Info.mu] = algo_locals["mu_"]
cpt_str = str(self.cpt).zfill(int(np.log10(algo.max_iter)+1))
output_filename = self.output_prefix + 'conesta_ite_%s.npz' % (cpt_str)
# print "AlgorithmSnapshot.save_conesta: save in ", output_filename
np.savez_compressed(output_filename, **snapshot)
def save_fista(self, algo_locals):
self.cpt += 1
if (self.cpt % self.saving_period) != 0:
return
algo = algo_locals["self"]
snapshot = dict(beta=algo_locals["betanew"])
if algo.info_requested(Info.num_iter):
snapshot[Info.num_iter] = algo.num_iter
if algo.info_requested(Info.time):
snapshot[Info.time] = algo_locals["t_"]
if algo.info_requested(Info.func_val):
snapshot[Info.func_val] = algo_locals["f_"]
if algo.info_requested(Info.fvalue):
snapshot[Info.fvalue] = algo_locals["f_"]
if algo.info_requested(Info.gap):
snapshot[Info.gap] = algo_locals["gap_"]
cpt_str = str(self.cpt).zfill(int(np.log10(algo.max_iter)+1))
output_filename = self.output_prefix + 'fista_ite_%s.npz' % (cpt_str)
# print "AlgorithmSnapshot.save_fista: save in ", output_filename
np.savez_compressed(output_filename, **snapshot)
def direct_vector(v):
"""In some algorithms (e.g. the SVD), the vectors are not deterministic,
but may flip sign and still return the same optimal function value.
This method flips them such that they are always positively correlated with
a vector of ones.
Parameters
----------
v : Numpy array, shape p-by-1. The vector to direct.
"""
i = np.ones(v.shape)
if np.dot(v.T, i) < 0.0:
v = -v
return v
# TODO: Remove or replace! Use functionality from scipy.optimize instead!
class Bisection(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""Finds a root of the function assumed to be on the line between two
points.
Assumes a function f(x) such that |f(x)|_2 < -eps if x is too small,
|f(x)|_2 > eps if x is too large and |f(x)|_2 <= eps if x is just right.
Parameters
----------
force_negative : Boolean. Default is False. Will try, by running more
iterations, to make the result negative. It may fail, but that is
unlikely.
eps : Positive float. A value such that |f(x)|_2 <= eps. Only guaranteed
if |f(x)|_2 <= eps in less than max_iter iterations.
info : List or tuple of utils.Info. What, if any, extra run
information should be stored. Default is an empty list, which means
that no run information is computed nor returned.
max_iter : Non-negative integer. Maximum allowed number of iterations.
min_iter : Non-negative integer less than or equal to max_iter. Minimum
number of iterations that must be performed. Default is 1.
"""
INTERFACES = [properties.Function]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.converged]
def __init__(self, force_negative=False,
parameter_positive=True,
parameter_negative=True,
parameter_zero=True,
eps=consts.TOLERANCE,
info=[], max_iter=30, min_iter=1):
super(Bisection, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.force_negative = force_negative
self.parameter_positive = parameter_positive
self.parameter_negative = parameter_negative
self.parameter_zero = parameter_zero
self.eps = eps
@bases.force_reset
@bases.check_compatibility
def run(self, function, x=None):
"""
Parameters
----------
function : Function. The function for which a root is found.
x : A vector or tuple with two elements. The first element is the lower
end of the interval for which |f(x[0])|_2 < -eps. The second
element is the upper end of the interfal for which
|f(x[1])|_2 > eps. If x is None, these values are found
automatically. Finding them may be slow, though, if the
function is expensive to evaluate.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
if x is not None:
low = x[0]
high = x[1]
else:
if self.parameter_negative:
low = -1.0
elif self.parameter_zero:
low = 0.0
else:
low = consts.TOLERANCE
if self.parameter_positive:
high = 1.0
elif self.parameter_zero:
high = 0.0
else:
high = -consts.TOLERANCE
# Find start values. If the low and high
# values are feasible this will just break
for i in range(self.max_iter):
f_low = function.f(low)
f_high = function.f(high)
# print "low :", low, ", f:", f_low
# print "high:", high, ", f:", f_high
if np.sign(f_low) != np.sign(f_high):
break
else:
if self.parameter_positive \
and self.parameter_negative \
and self.parameter_zero:
low -= abs(low) * 2.0 ** i
high += abs(high) * 2.0 ** i
elif self.parameter_positive \
and self.parameter_negative \
and not self.parameter_zero:
low -= abs(low) * 2.0 ** i
high += abs(high) * 2.0 ** i
if abs(low) < consts.TOLERANCE:
low -= consts.TOLERANCE
if abs(high) < consts.TOLERANCE:
high += consts.TOLERANCE
elif self.parameter_positive \
and not self.parameter_negative \
and self.parameter_zero:
low /= 2.0
high *= 2.0
elif self.parameter_positive \
and not self.parameter_negative \
and not self.parameter_zero:
low /= 2.0
high *= 2.0
if abs(low) < consts.TOLERANCE:
low = consts.TOLERANCE
if abs(high) < consts.TOLERANCE:
high = consts.TOLERANCE
elif not self.parameter_positive \
and self.parameter_negative \
and self.parameter_zero:
low *= 2.0
high /= 2.0
elif not self.parameter_positive \
and self.parameter_negative \
and not self.parameter_zero:
low *= 2.0
high /= 2.0
if abs(low) < consts.TOLERANCE:
low = -consts.TOLERANCE
if abs(high) < consts.TOLERANCE:
high = -consts.TOLERANCE
elif not self.parameter_positive \
and not self.parameter_negative \
and self.parameter_zero:
low = 0.0
high = 0.0
elif not self.parameter_positive \
and not self.parameter_negative \
and not self.parameter_zero:
raise ValueError("Parameter must be allowed to be real!")
# Use the bisection method to find where |f(x)|_2 <= eps.
neg_count = 0
mid = (low + high) / 2.0
f_mid = function.f(mid)
i = 0
while True:
if np.sign(f_mid) == np.sign(f_low):
low = mid
f_low = f_mid
else:
high = mid
f_high = f_mid
mid = (low + high) / 2.0
f_mid = function.f(mid)
# print "i:", (i + 1), ", mid: ", mid, ", f_mid:", f_mid
if (abs(f_high - f_low) <= self.eps and i >= self.min_iter - 1) \
or i >= self.max_iter - 1:
if self.force_negative and f_mid > 0.0:
if neg_count < self.max_iter:
neg_count += 1
else:
break
else:
break
i += 1
if self.info_requested(Info.converged):
if abs(f_high - f_low) <= self.eps:
self.info_set(Info.converged, True)
if self.force_negative and f_mid > 0.0:
self.info_set(Info.converged, False)
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i + 1)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
self.num_iter = i + 1
# TODO: We already have f_mid, so we can return a better approximation
# here!
return mid
class NewtonRaphson(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""Finds a root of the function assumed to be in the vicinity of a given
point.
Newtons method is not guaranteed to converge, and may diverge from the
solution if e.g. the starting point is too far from the root.
Problems may also arise if the gradient is too small (e.g. at a stationary
point) on the path to the root.
Parameters
----------
force_negative : Boolean. Default is False. Will try to make the result
negative. It may fail if the function does not behave "nicely"
around the found point.
eps : Positive float. A small value used as the stopping criterion. The
stopping criterion will be fulfilled if it converges in less
than max_iter iterations.
info : List or tuple of utils.Info. What, if any, extra run
information should be stored. Default is an empty list, which means
that no run information is computed nor returned.
max_iter : Non-negative integer. Maximum allowed number of iterations.
min_iter : Non-negative integer less than or equal to max_iter. Minimum
number of iterations that must be performed. Default is 1.
"""
INTERFACES = [properties.Function,
properties.Gradient]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.converged]
def __init__(self, force_negative=False,
parameter_positive=True,
parameter_negative=True,
parameter_zero=True,
eps=consts.TOLERANCE,
info=[], max_iter=30, min_iter=1):
super(NewtonRaphson, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.force_negative = force_negative
self.parameter_positive = parameter_positive
self.parameter_negative = parameter_negative
self.parameter_zero = parameter_zero
self.eps = eps
@bases.force_reset
@bases.check_compatibility
def run(self, function, x=None):
"""
Parameters
----------
function : Function. The function for which a root should be found.
x : Float. The starting point of the Newton-Raphson algorithm. Should
be "close" to the root.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
if x is None:
if self.parameter_positive:
x = 1.0
elif self.parameter_negative:
x = -1.0
else:
x = 0.0
# Use the Newton-Raphson algorithm to find a root of f(x).
i = 0
while True:
x_ = x
f = function.f(x_)
df = function.grad(x_)
x = x_ - f / df
# TODO: Handle the other cases!
if not self.parameter_negative \
and not self.parameter_zero \
and self.parameter_positive \
and x < consts.TOLERANCE:
x = consts.TOLERANCE
elif not self.parameter_negative \
and self.parameter_zero \
and self.parameter_positive \
and x < 0.0:
x = 0.0
# TODO: We seek a root, i.e. where f(x) = 0. The stopping criterion
# should (could?) thus be abs(f(x)) <= eps!
if (abs(x - x_) <= self.eps and i >= self.min_iter - 1) \
or i >= self.max_iter - 1:
if self.force_negative:
f = function.f(x)
if f > 0.0:
df = function.grad(x)
# We assume that we are within |x_opt - x| < eps from
# the root. I.e. that the root is within the interval
# [x_opt - eps, x_opt + eps]. We are at x_opt + eps or
# x_opt - eps. Then we go to x_opt - 0.5 * eps or
# x_opt + 0.5 * eps, respectively.
x -= 1.5 * (f / df)
# f = function.f(x)
break
i += 1
if self.info_requested(Info.converged):
if abs(x - x_) <= self.eps: # TODO: Stopping criterion. See above!
self.info_set(Info.converged, True)
if self.force_negative:
f = function.f(x)
if f > 0.0:
self.info_set(Info.converged, False)
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i + 1)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
self.num_iter = i + 1
return x
class BacktrackingLineSearch(bases.ExplicitAlgorithm):
"""Finds a step length a that fulfills a given descent criterion.
"""
INTERFACES = [properties.Function,
properties.Gradient]
def __init__(self, condition=None,
output=False,
max_iter=30, min_iter=1,
eps=consts.TOLERANCE): # Note that tolerance is never used!
"""
Parameters
----------
condition : The class of the descent condition. If not given, defaults
to the SufficientDescentCondition.
output : Boolean. Whether or not to return additional output.
max_iter : Non-negative integer. The maximum allowed number of
iterations.
min_iter : Non-negative integer, min_iter <= max_iter. The minimum
number of iterations that must be made.
"""
self.condition = condition
if self.condition is None:
self.condition = penalties.SufficientDescentCondition
self.output = output
self.max_iter = max_iter
self.min_iter = min_iter
def run(self, function, x, p, rho=0.5, a=1.0, condition_params=dict()):
"""Finds the step length for a descent algorithm.
Parameters
----------
function : A Loss function. The function to minimise.
x : Numpy array. The current point.
p : Numpy array. The descent direction.
rho : Float, 0 < rho < 1. The rate at which to decrease a in each
iteration. Smaller will finish faster, but may yield a lesser
descent.
a : Float. The upper bound on the step length. Defaults to 1.0, which
is suitable for e.g. Newton's method.
condition_params : Dictionary. Parameters for the descent condition.
"""
self.check_compatibility(function, self.INTERFACES)
line_search = self.condition(function, p, **condition_params)
it = 0
while True:
if line_search.feasible((x, a)):
# print "Broke after %d iterations of %d iterations." \
# % (it, self.max_iter)
return a
it += 1
if it >= self.max_iter:
return 0.0 # If we did not find a feasible point, don't move!
a = a * rho
class StepSize(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, k=None, beta=None, grad=None):
raise NotImplementedError('Abstract method "__call__" must be '
'specialised!')
class SqSumNotSumStepSize(StepSize):
"""Represents the square summable but not summable step size
t_k = a / (b + k),
where a > 0 and b >= 0.
Parameters
----------
a : float
Positive value. Factor in the numerator. Large values give longer
steps. Default is 0.1.
b : float
Non-negative value. Addend in the denominator. Large values give
smaller steps. Default is 0.
"""
def __init__(self, a=0.1, b=0.0):
self.a = max(consts.TOLERANCE, float(a))
self.b = max(0.0, float(b))
def __call__(self, k=None, beta=None, grad=None):
return self.a / (self.b + float(k))
class NonSumDimStepSize(StepSize):
"""Represents the non-summable diminishing step size
t_k = a / sqrt(k),
where a > 0.
Parameters
----------
a : float
Positive value. Factor in the numerator. Large values give longer
steps. Default is 0.1.
"""
def __init__(self, a=0.1):
self.a = max(consts.TOLERANCE, float(a))
def __call__(self, k=None, beta=None, grad=None):
return self.a / np.sqrt(float(k))
# TODO: Be clever if we cannot fit self._K in memory!
class Kernel(object):
__metaclass__ = abc.ABCMeta
def __init__(self, X=None):
self.X = X
self._use_cache = (self.X is not None)
if self._use_cache:
self.shape = (self.X.shape[0], self.X.shape[0])
self.reset()
def reset(self):
if self._use_cache:
self._cache = dict()
self._vector_cache = dict()
self._K = np.zeros(self.shape)
self._K_computed = np.zeros(self.shape, dtype=np.bool)
self._K_num = 0
def __call__(self, x1, x2=None):
if x2 is not None:
if (isinstance(x1, (int, np.int32, np.int64)) and
isinstance(x2, (int, np.int32, np.int64))):
return self._index(x1, x2)
else:
return self._vector(x1, x2)
else:
if self.X is None:
raise RuntimeError("The kernel is not based on a matrix, X!")
K_ = np.zeros((self.shape[0], 1))
if isinstance(x1, (int, np.int64)):
for i in range(self.shape[0]):
K_[i, 0] = self._index(i, x1)
else:
for i in range(self.shape[0]):
K_[i, 0] = self._vector(self.X[i, :], x1)
return K_
def dot(self, other):
if not isinstance(other, np.ndarray):
raise ValueError("Argument is not a numpy array!")
if self.X is None:
raise RuntimeError("The kernel is not based on a matrix, X!")
if len(other.shape) != 2:
raise ValueError("Shapes " + str(other.shape) + " and " +
str(self.shape) + " not aligned!")
if other.shape[0] != self.shape[1]:
raise ValueError("Shapes " + str(other.shape) + " and " +
str(self.shape) + " not aligned!")
if hasattr(self, "_K") and self._K_num == np.prod(self.shape):
val = self._K.dot(other)
else:
val = np.zeros((self.shape[0], 1))
for i in range(self.shape[0]):
for j in range(self.shape[0]):
val[i, 0] += self._index(i, j) * other[j, 0]
return val
@abc.abstractmethod
def _index(self, i1, i2):
raise NotImplementedError('Abstract method "_index" must be '
'specialised!')
@abc.abstractmethod
def _vector(self, x1, x2):
raise NotImplementedError('Abstract method "_vector" must be '
'specialised!')
class ExplicitKernel(Kernel):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def transform(self, w):
"""The explicit non-linear transform of the input vector.
"""
raise NotImplementedError('Abstract method "transform" must be '
'specialised!')
class LinearKernel(ExplicitKernel):
def __init__(self, **kwargs):
super(LinearKernel, self).__init__(**kwargs)
def _index(self, i1, i2):
i1 = int(i1)
i2 = int(i2)
if self._use_cache:
# if (i1, i2) in self._cache:
# return self._cache[(i1, i2)]
if self._K_computed[i1, i2]:
return self._K[i1, i2]
else:
x1 = self.X[i1, :]
x2 = self.X[i2, :]
val = np.dot(x1.T, x2)
# self._cache[(i1, i2)] = val
self._K_computed[i1, i2] = True
self._K_computed[i2, i1] = True
self._K[i1, i2] = val
self._K[i2, i1] = val
self._K_num += 2
else:
x1 = self.X[i1, :]
x2 = self.X[i2, :]
val = np.dot(x1.T, x2)
if isinstance(val, np.ndarray):
val = val[0, 0]
return val
def _vector(self, x1, x2):
x1, x2 = check_arrays(x1, x2)
val = np.dot(x1.T, x2)
if isinstance(val, np.ndarray):
val = val[0, 0]
return val
def transform(self, w):
return w
if __name__ == "__main__":
import doctest
doctest.testmod() | parsimony/algorithms/utils.py | import abc
import numpy as np
try:
from . import bases # Only works when imported as a package.
except (ValueError, SystemError):
import parsimony.algorithms.bases as bases # When run as a program.
from parsimony.utils import check_arrays
import parsimony.utils.consts as consts
import parsimony.functions.penalties as penalties
import parsimony.functions.properties as properties
__all__ = ["Info", "AlgorithmSnapshot",
"direct_vector",
"Bisection", "NewtonRaphson",
"BacktrackingLineSearch",
"StepSize", "SqSumNotSumStepSize", "NonSumDimStepSize",
"Kernel", "LinearKernel"]
# TODO: This class should be replaced with Enum.
class Info(object):
"""Enum-like class for information constants.
Fields may _NOT_ be None!
This class will be replaced with Enum, so do not rely on the actual values
of the fields. E.g., never use the string "ok", always use Info.ok.
"""
ok = "ok" # Did everything go well?
converged = "converged" # Did the algorithm converge?
num_iter = "num_iter" # Number of iterations.
time = "time" # Time of e.g. every iteration.
func_val = "func_val" # Function value at e.g. every iteration.
fvalue = "fvalue" # Function value at e.g. every iteration. [Deprecated!!]
smooth_func_val = "smooth_func_val" # Smoothed function value.
gap = "gap" # The gap at e.g. every iteration.
mu = "mu" # Smoothing constant, or other parameter, at every iteration.
parameter = "parameter" # Parameter(s), at e.g. every iteration.
bound = "bound" # Upper bound at e.g. every iteration.
beta = "beta" # E.g. the start vector used.
betak = "betak" # The final found vector.
beta_start = "beta_start" # The start vector used.
continuations = "continuations" # In continuation: Number of continuations
verbose = "verbose" # Tell algo to be verbose
param_start = "param_start" # The start parameters used.
param_end = "param_end" # The final parameters found by the algorithm.
iterates = "iterates" # The iterates generated by the algorithm.
acceptance_rate = "acceptance_rate" # Acceptance rate of sampling algos.
other = "other" # Any info that was not covered by the above.
# TODO: Replace beta, betak and beta_start with param_start and param_end
class AlgorithmSnapshot:
"""Save a Snapshot of the algorithm state to disk.
The save_* methods can be provided as callback argument to either FISTA or
CONESTA. This callback will be called at each iteration.
Parameters
----------
output_prefix: string a prefix path to store algorithm state.
saving_period: int the period (# of iterations) of trig the saving.
Example
-------
>>> import os
>>> import tempfile
>>> import numpy as np
>>> import parsimony.estimators as estimators
>>> import parsimony.algorithms.proximal as proximal
>>> from parsimony.algorithms.utils import AlgorithmSnapshot
>>>
>>> prefix = os.path.join(tempfile.mkdtemp(), "snapshots")
>>> snapshot = AlgorithmSnapshot(prefix, saving_period=10).save_fista
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(10, 16)
>>> y = np.random.rand(10, 1)
>>>
>>> en = estimators.ElasticNet(0.1,
... algorithm=proximal.FISTA(max_iter=50, callback=snapshot))
>>> en = en.fit(X, y)
>>> import glob
>>> print("Nb snapshots = %d" % (len(glob.glob(prefix + "*")),))
Nb snapshots = 5
"""
def __init__(self, output_prefix, saving_period=100):
self.output_prefix = output_prefix
self.saving_period = saving_period
self.cpt = 0
self.continuation_ite_nb = list() # ite nb where continuation occured
def save_conesta(self, algo_locals):
self.cpt += 1
# ite = algo_locals["i"]
if (self.cpt % self.saving_period) != 0:
return
algo = algo_locals["self"]
self.continuation_ite_nb.append(algo.num_iter)
snapshot = dict(beta=algo_locals["beta"],
continuation_ite_nb=self.continuation_ite_nb,
gM=algo_locals["gM"])
if algo.info_requested(Info.num_iter):
snapshot[Info.num_iter] = algo.num_iter
if algo.info_requested(Info.continuations):
snapshot[Info.continuations] = algo_locals["i"] + 1
if algo.info_requested(Info.time):
snapshot[Info.time] = algo_locals["t_"]
if algo.info_requested(Info.func_val):
snapshot[Info.func_val] = algo_locals["f_"]
if algo.info_requested(Info.fvalue):
snapshot[Info.fvalue] = algo_locals["f_"]
if algo.info_requested(Info.gap):
snapshot[Info.gap] = algo_locals["gap_"]
if algo.info_requested(Info.mu):
snapshot[Info.mu] = algo_locals["mu_"]
cpt_str = str(self.cpt).zfill(int(np.log10(algo.max_iter)+1))
output_filename = self.output_prefix + 'conesta_ite_%s.npz' % (cpt_str)
# print "AlgorithmSnapshot.save_conesta: save in ", output_filename
np.savez_compressed(output_filename, **snapshot)
def save_fista(self, algo_locals):
self.cpt += 1
if (self.cpt % self.saving_period) != 0:
return
algo = algo_locals["self"]
snapshot = dict(beta=algo_locals["betanew"])
if algo.info_requested(Info.num_iter):
snapshot[Info.num_iter] = algo.num_iter
if algo.info_requested(Info.time):
snapshot[Info.time] = algo_locals["t_"]
if algo.info_requested(Info.func_val):
snapshot[Info.func_val] = algo_locals["f_"]
if algo.info_requested(Info.fvalue):
snapshot[Info.fvalue] = algo_locals["f_"]
if algo.info_requested(Info.gap):
snapshot[Info.gap] = algo_locals["gap_"]
cpt_str = str(self.cpt).zfill(int(np.log10(algo.max_iter)+1))
output_filename = self.output_prefix + 'fista_ite_%s.npz' % (cpt_str)
# print "AlgorithmSnapshot.save_fista: save in ", output_filename
np.savez_compressed(output_filename, **snapshot)
def direct_vector(v):
"""In some algorithms (e.g. the SVD), the vectors are not deterministic,
but may flip sign and still return the same optimal function value.
This method flips them such that they are always positively correlated with
a vector of ones.
Parameters
----------
v : Numpy array, shape p-by-1. The vector to direct.
"""
i = np.ones(v.shape)
if np.dot(v.T, i) < 0.0:
v = -v
return v
# TODO: Remove or replace! Use functionality from scipy.optimize instead!
class Bisection(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""Finds a root of the function assumed to be on the line between two
points.
Assumes a function f(x) such that |f(x)|_2 < -eps if x is too small,
|f(x)|_2 > eps if x is too large and |f(x)|_2 <= eps if x is just right.
Parameters
----------
force_negative : Boolean. Default is False. Will try, by running more
iterations, to make the result negative. It may fail, but that is
unlikely.
eps : Positive float. A value such that |f(x)|_2 <= eps. Only guaranteed
if |f(x)|_2 <= eps in less than max_iter iterations.
info : List or tuple of utils.Info. What, if any, extra run
information should be stored. Default is an empty list, which means
that no run information is computed nor returned.
max_iter : Non-negative integer. Maximum allowed number of iterations.
min_iter : Non-negative integer less than or equal to max_iter. Minimum
number of iterations that must be performed. Default is 1.
"""
INTERFACES = [properties.Function]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.converged]
def __init__(self, force_negative=False,
parameter_positive=True,
parameter_negative=True,
parameter_zero=True,
eps=consts.TOLERANCE,
info=[], max_iter=30, min_iter=1):
super(Bisection, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.force_negative = force_negative
self.parameter_positive = parameter_positive
self.parameter_negative = parameter_negative
self.parameter_zero = parameter_zero
self.eps = eps
@bases.force_reset
@bases.check_compatibility
def run(self, function, x=None):
"""
Parameters
----------
function : Function. The function for which a root is found.
x : A vector or tuple with two elements. The first element is the lower
end of the interval for which |f(x[0])|_2 < -eps. The second
element is the upper end of the interfal for which
|f(x[1])|_2 > eps. If x is None, these values are found
automatically. Finding them may be slow, though, if the
function is expensive to evaluate.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
if x is not None:
low = x[0]
high = x[1]
else:
if self.parameter_negative:
low = -1.0
elif self.parameter_zero:
low = 0.0
else:
low = consts.TOLERANCE
if self.parameter_positive:
high = 1.0
elif self.parameter_zero:
high = 0.0
else:
high = -consts.TOLERANCE
# Find start values. If the low and high
# values are feasible this will just break
for i in range(self.max_iter):
f_low = function.f(low)
f_high = function.f(high)
# print "low :", low, ", f:", f_low
# print "high:", high, ", f:", f_high
if np.sign(f_low) != np.sign(f_high):
break
else:
if self.parameter_positive \
and self.parameter_negative \
and self.parameter_zero:
low -= abs(low) * 2.0 ** i
high += abs(high) * 2.0 ** i
elif self.parameter_positive \
and self.parameter_negative \
and not self.parameter_zero:
low -= abs(low) * 2.0 ** i
high += abs(high) * 2.0 ** i
if abs(low) < consts.TOLERANCE:
low -= consts.TOLERANCE
if abs(high) < consts.TOLERANCE:
high += consts.TOLERANCE
elif self.parameter_positive \
and not self.parameter_negative \
and self.parameter_zero:
low /= 2.0
high *= 2.0
elif self.parameter_positive \
and not self.parameter_negative \
and not self.parameter_zero:
low /= 2.0
high *= 2.0
if abs(low) < consts.TOLERANCE:
low = consts.TOLERANCE
if abs(high) < consts.TOLERANCE:
high = consts.TOLERANCE
elif not self.parameter_positive \
and self.parameter_negative \
and self.parameter_zero:
low *= 2.0
high /= 2.0
elif not self.parameter_positive \
and self.parameter_negative \
and not self.parameter_zero:
low *= 2.0
high /= 2.0
if abs(low) < consts.TOLERANCE:
low = -consts.TOLERANCE
if abs(high) < consts.TOLERANCE:
high = -consts.TOLERANCE
elif not self.parameter_positive \
and not self.parameter_negative \
and self.parameter_zero:
low = 0.0
high = 0.0
elif not self.parameter_positive \
and not self.parameter_negative \
and not self.parameter_zero:
raise ValueError("Parameter must be allowed to be real!")
# Use the bisection method to find where |f(x)|_2 <= eps.
neg_count = 0
mid = (low + high) / 2.0
f_mid = function.f(mid)
i = 0
while True:
if np.sign(f_mid) == np.sign(f_low):
low = mid
f_low = f_mid
else:
high = mid
f_high = f_mid
mid = (low + high) / 2.0
f_mid = function.f(mid)
# print "i:", (i + 1), ", mid: ", mid, ", f_mid:", f_mid
if (abs(f_high - f_low) <= self.eps and i >= self.min_iter - 1) \
or i >= self.max_iter - 1:
if self.force_negative and f_mid > 0.0:
if neg_count < self.max_iter:
neg_count += 1
else:
break
else:
break
i += 1
if self.info_requested(Info.converged):
if abs(f_high - f_low) <= self.eps:
self.info_set(Info.converged, True)
if self.force_negative and f_mid > 0.0:
self.info_set(Info.converged, False)
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i + 1)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
self.num_iter = i + 1
# TODO: We already have f_mid, so we can return a better approximation
# here!
return mid
class NewtonRaphson(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""Finds a root of the function assumed to be in the vicinity of a given
point.
Newtons method is not guaranteed to converge, and may diverge from the
solution if e.g. the starting point is too far from the root.
Problems may also arise if the gradient is too small (e.g. at a stationary
point) on the path to the root.
Parameters
----------
force_negative : Boolean. Default is False. Will try to make the result
negative. It may fail if the function does not behave "nicely"
around the found point.
eps : Positive float. A small value used as the stopping criterion. The
stopping criterion will be fulfilled if it converges in less
than max_iter iterations.
info : List or tuple of utils.Info. What, if any, extra run
information should be stored. Default is an empty list, which means
that no run information is computed nor returned.
max_iter : Non-negative integer. Maximum allowed number of iterations.
min_iter : Non-negative integer less than or equal to max_iter. Minimum
number of iterations that must be performed. Default is 1.
"""
INTERFACES = [properties.Function,
properties.Gradient]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.converged]
def __init__(self, force_negative=False,
parameter_positive=True,
parameter_negative=True,
parameter_zero=True,
eps=consts.TOLERANCE,
info=[], max_iter=30, min_iter=1):
super(NewtonRaphson, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.force_negative = force_negative
self.parameter_positive = parameter_positive
self.parameter_negative = parameter_negative
self.parameter_zero = parameter_zero
self.eps = eps
@bases.force_reset
@bases.check_compatibility
def run(self, function, x=None):
"""
Parameters
----------
function : Function. The function for which a root should be found.
x : Float. The starting point of the Newton-Raphson algorithm. Should
be "close" to the root.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
if x is None:
if self.parameter_positive:
x = 1.0
elif self.parameter_negative:
x = -1.0
else:
x = 0.0
# Use the Newton-Raphson algorithm to find a root of f(x).
i = 0
while True:
x_ = x
f = function.f(x_)
df = function.grad(x_)
x = x_ - f / df
# TODO: Handle the other cases!
if not self.parameter_negative \
and not self.parameter_zero \
and self.parameter_positive \
and x < consts.TOLERANCE:
x = consts.TOLERANCE
elif not self.parameter_negative \
and self.parameter_zero \
and self.parameter_positive \
and x < 0.0:
x = 0.0
# TODO: We seek a root, i.e. where f(x) = 0. The stopping criterion
# should (could?) thus be abs(f(x)) <= eps!
if (abs(x - x_) <= self.eps and i >= self.min_iter - 1) \
or i >= self.max_iter - 1:
if self.force_negative:
f = function.f(x)
if f > 0.0:
df = function.grad(x)
# We assume that we are within |x_opt - x| < eps from
# the root. I.e. that the root is within the interval
# [x_opt - eps, x_opt + eps]. We are at x_opt + eps or
# x_opt - eps. Then we go to x_opt - 0.5 * eps or
# x_opt + 0.5 * eps, respectively.
x -= 1.5 * (f / df)
# f = function.f(x)
break
i += 1
if self.info_requested(Info.converged):
if abs(x - x_) <= self.eps: # TODO: Stopping criterion. See above!
self.info_set(Info.converged, True)
if self.force_negative:
f = function.f(x)
if f > 0.0:
self.info_set(Info.converged, False)
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i + 1)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
self.num_iter = i + 1
return x
class BacktrackingLineSearch(bases.ExplicitAlgorithm):
"""Finds a step length a that fulfills a given descent criterion.
"""
INTERFACES = [properties.Function,
properties.Gradient]
def __init__(self, condition=None,
output=False,
max_iter=30, min_iter=1,
eps=consts.TOLERANCE): # Note that tolerance is never used!
"""
Parameters
----------
condition : The class of the descent condition. If not given, defaults
to the SufficientDescentCondition.
output : Boolean. Whether or not to return additional output.
max_iter : Non-negative integer. The maximum allowed number of
iterations.
min_iter : Non-negative integer, min_iter <= max_iter. The minimum
number of iterations that must be made.
"""
self.condition = condition
if self.condition is None:
self.condition = penalties.SufficientDescentCondition
self.output = output
self.max_iter = max_iter
self.min_iter = min_iter
def run(self, function, x, p, rho=0.5, a=1.0, condition_params=dict()):
"""Finds the step length for a descent algorithm.
Parameters
----------
function : A Loss function. The function to minimise.
x : Numpy array. The current point.
p : Numpy array. The descent direction.
rho : Float, 0 < rho < 1. The rate at which to decrease a in each
iteration. Smaller will finish faster, but may yield a lesser
descent.
a : Float. The upper bound on the step length. Defaults to 1.0, which
is suitable for e.g. Newton's method.
condition_params : Dictionary. Parameters for the descent condition.
"""
self.check_compatibility(function, self.INTERFACES)
line_search = self.condition(function, p, **condition_params)
it = 0
while True:
if line_search.feasible((x, a)):
# print "Broke after %d iterations of %d iterations." \
# % (it, self.max_iter)
return a
it += 1
if it >= self.max_iter:
return 0.0 # If we did not find a feasible point, don't move!
a = a * rho
class StepSize(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, k=None, beta=None, grad=None):
raise NotImplementedError('Abstract method "__call__" must be '
'specialised!')
class SqSumNotSumStepSize(StepSize):
"""Represents the square summable but not summable step size
t_k = a / (b + k),
where a > 0 and b >= 0.
Parameters
----------
a : float
Positive value. Factor in the numerator. Large values give longer
steps. Default is 0.1.
b : float
Non-negative value. Addend in the denominator. Large values give
smaller steps. Default is 0.
"""
def __init__(self, a=0.1, b=0.0):
self.a = max(consts.TOLERANCE, float(a))
self.b = max(0.0, float(b))
def __call__(self, k=None, beta=None, grad=None):
return self.a / (self.b + float(k))
class NonSumDimStepSize(StepSize):
"""Represents the non-summable diminishing step size
t_k = a / sqrt(k),
where a > 0.
Parameters
----------
a : float
Positive value. Factor in the numerator. Large values give longer
steps. Default is 0.1.
"""
def __init__(self, a=0.1):
self.a = max(consts.TOLERANCE, float(a))
def __call__(self, k=None, beta=None, grad=None):
return self.a / np.sqrt(float(k))
# TODO: Be clever if we cannot fit self._K in memory!
class Kernel(object):
__metaclass__ = abc.ABCMeta
def __init__(self, X=None):
self.X = X
self._use_cache = (self.X is not None)
if self._use_cache:
self.shape = (self.X.shape[0], self.X.shape[0])
self.reset()
def reset(self):
if self._use_cache:
self._cache = dict()
self._vector_cache = dict()
self._K = np.zeros(self.shape)
self._K_computed = np.zeros(self.shape, dtype=np.bool)
self._K_num = 0
def __call__(self, x1, x2=None):
if x2 is not None:
if (isinstance(x1, (int, np.int32, np.int64)) and
isinstance(x2, (int, np.int32, np.int64))):
return self._index(x1, x2)
else:
return self._vector(x1, x2)
else:
if self.X is None:
raise RuntimeError("The kernel is not based on a matrix, X!")
K_ = np.zeros((self.shape[0], 1))
if isinstance(x1, (int, np.int64)):
for i in range(self.shape[0]):
K_[i, 0] = self._index(i, x1)
else:
for i in range(self.shape[0]):
K_[i, 0] = self._vector(self.X[i, :], x1)
return K_
def dot(self, other):
if not isinstance(other, np.ndarray):
raise ValueError("Argument is not a numpy array!")
if self.X is None:
raise RuntimeError("The kernel is not based on a matrix, X!")
if len(other.shape) != 2:
raise ValueError("Shapes " + str(other.shape) + " and " +
str(self.shape) + " not aligned!")
if other.shape[0] != self.shape[1]:
raise ValueError("Shapes " + str(other.shape) + " and " +
str(self.shape) + " not aligned!")
if hasattr(self, "_K") and self._K_num == np.prod(self.shape):
val = self._K.dot(other)
else:
val = np.zeros((self.shape[0], 1))
for i in range(self.shape[0]):
for j in range(self.shape[0]):
val[i, 0] += self._index(i, j) * other[j, 0]
return val
@abc.abstractmethod
def _index(self, i1, i2):
raise NotImplementedError('Abstract method "_index" must be '
'specialised!')
@abc.abstractmethod
def _vector(self, x1, x2):
raise NotImplementedError('Abstract method "_vector" must be '
'specialised!')
class ExplicitKernel(Kernel):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def transform(self, w):
"""The explicit non-linear transform of the input vector.
"""
raise NotImplementedError('Abstract method "transform" must be '
'specialised!')
class LinearKernel(ExplicitKernel):
def __init__(self, **kwargs):
super(LinearKernel, self).__init__(**kwargs)
def _index(self, i1, i2):
i1 = int(i1)
i2 = int(i2)
if self._use_cache:
# if (i1, i2) in self._cache:
# return self._cache[(i1, i2)]
if self._K_computed[i1, i2]:
return self._K[i1, i2]
else:
x1 = self.X[i1, :]
x2 = self.X[i2, :]
val = np.dot(x1.T, x2)
# self._cache[(i1, i2)] = val
self._K_computed[i1, i2] = True
self._K_computed[i2, i1] = True
self._K[i1, i2] = val
self._K[i2, i1] = val
self._K_num += 2
else:
x1 = self.X[i1, :]
x2 = self.X[i2, :]
val = np.dot(x1.T, x2)
if isinstance(val, np.ndarray):
val = val[0, 0]
return val
def _vector(self, x1, x2):
x1, x2 = check_arrays(x1, x2)
val = np.dot(x1.T, x2)
if isinstance(val, np.ndarray):
val = val[0, 0]
return val
def transform(self, w):
return w
if __name__ == "__main__":
import doctest
doctest.testmod() | 0.522202 | 0.320542 |
import os
import time
import random
def cls():
# Limpar tela
os.system('cls' if os.name == 'nt' else 'clear')
def Intro():
# Introdução do jogo
print("=*" * 20)
print(f"{'Jogo da Adivinhação':^40}")
print("=*" * 20)
time.sleep(2)
cls()
print("=*" * 20)
print(f"{'Tente adivinhar o número sorteado':^40}")
print("=*" * 20)
time.sleep(2)
def menu():
# Menu de opções com tentativas
while True:
print("*=" * 21)
print(f"{'Opção | Dificuldade | Info':^40}")
print("-"*42)
print(" 1 | Fácil | 20 tentativas")
print(" 2 | Médio | 10 tentativas")
print(" 3 | Difícil | 5 tentativas")
print("*=" * 21)
try:
opcao_menu = int(input("Qual a dificuldade do jogo?\n"))
if (opcao_menu < 1 or opcao_menu > 3):
print("Por favor, informe uma opção válida, de 1 a 4\n")
time.sleep(2)
cls()
continue
else:
break
except:
print("Por favor, informe uma opção válida, de 1 a 4\n")
time.sleep(2)
cls()
if (opcao_menu == 1):
chance = 20
elif (opcao_menu == 2):
chance = 10
else:
chance = 5
return chance
def numero_digitado():
"""
Válida o número digitado pelo usuário
:return: retorna o número digitado já validado pro game
"""
while True:
try:
numero_player = int(input("Seu palpite >> "))
if (numero_player < 1 or numero_player > 100):
print("Por favor, informe um número entre 1 e 100\n")
continue
else:
break
except:
print("Não conseguir entender, por favor digite um número inteiro")
return numero_player
def logica_game(x:int, num_rand:int):
"""
Contém toda lógica dos ifs e ira retornar o i (variável de controle)
:param x: Número inteiro com a quantidade de tentativas escolhida no menu
:return: Caso retorne 0 significa que o jogador ganhou, caso retorne diferente de 0 significa q perdeu.
Retorna também os pontos
"""
pontos = 100
print("tente adivinhar um número entre 1 e 100\n")
for i in range(x):
numero_jogador = numero_digitado() # Pega o número do usuario
# Verificação
menor = numero_jogador > num_rand
maior = numero_jogador < num_rand
pontos = pontos - abs(num_rand - numero_jogador)
if (menor):
print("O número aleatório é menor do que o seu")
elif (maior):
print("O número aleatório é maior do que o seu")
else:
i = 0
break
print(f"{i + 1} de {x} tentativas")
print("-" * 20)
return i, pontos
def Jogar():
Intro()
cls()
tentativas = menu() # Menu com as dificuldades (tentativas)
cls()
numero_aleatorio = random.randint(1, 100) # gera numeros aleatorios
ganhou, pontos_player = logica_game(tentativas, numero_aleatorio) # logica dos ifs do game
if (ganhou == 0):
print("Parabéns, você acertou o número")
print(f"Sua pontuação: {pontos_player}/100")
else:
print(f"Infelizmente você não conseguiu adivinhar o número {numero_aleatorio}")
if (__name__ == "__main__"):
Jogar() | Projetos/jogo_adivinhacao.py | import os
import time
import random
def cls():
# Limpar tela
os.system('cls' if os.name == 'nt' else 'clear')
def Intro():
# Introdução do jogo
print("=*" * 20)
print(f"{'Jogo da Adivinhação':^40}")
print("=*" * 20)
time.sleep(2)
cls()
print("=*" * 20)
print(f"{'Tente adivinhar o número sorteado':^40}")
print("=*" * 20)
time.sleep(2)
def menu():
# Menu de opções com tentativas
while True:
print("*=" * 21)
print(f"{'Opção | Dificuldade | Info':^40}")
print("-"*42)
print(" 1 | Fácil | 20 tentativas")
print(" 2 | Médio | 10 tentativas")
print(" 3 | Difícil | 5 tentativas")
print("*=" * 21)
try:
opcao_menu = int(input("Qual a dificuldade do jogo?\n"))
if (opcao_menu < 1 or opcao_menu > 3):
print("Por favor, informe uma opção válida, de 1 a 4\n")
time.sleep(2)
cls()
continue
else:
break
except:
print("Por favor, informe uma opção válida, de 1 a 4\n")
time.sleep(2)
cls()
if (opcao_menu == 1):
chance = 20
elif (opcao_menu == 2):
chance = 10
else:
chance = 5
return chance
def numero_digitado():
"""
Válida o número digitado pelo usuário
:return: retorna o número digitado já validado pro game
"""
while True:
try:
numero_player = int(input("Seu palpite >> "))
if (numero_player < 1 or numero_player > 100):
print("Por favor, informe um número entre 1 e 100\n")
continue
else:
break
except:
print("Não conseguir entender, por favor digite um número inteiro")
return numero_player
def logica_game(x:int, num_rand:int):
"""
Contém toda lógica dos ifs e ira retornar o i (variável de controle)
:param x: Número inteiro com a quantidade de tentativas escolhida no menu
:return: Caso retorne 0 significa que o jogador ganhou, caso retorne diferente de 0 significa q perdeu.
Retorna também os pontos
"""
pontos = 100
print("tente adivinhar um número entre 1 e 100\n")
for i in range(x):
numero_jogador = numero_digitado() # Pega o número do usuario
# Verificação
menor = numero_jogador > num_rand
maior = numero_jogador < num_rand
pontos = pontos - abs(num_rand - numero_jogador)
if (menor):
print("O número aleatório é menor do que o seu")
elif (maior):
print("O número aleatório é maior do que o seu")
else:
i = 0
break
print(f"{i + 1} de {x} tentativas")
print("-" * 20)
return i, pontos
def Jogar():
Intro()
cls()
tentativas = menu() # Menu com as dificuldades (tentativas)
cls()
numero_aleatorio = random.randint(1, 100) # gera numeros aleatorios
ganhou, pontos_player = logica_game(tentativas, numero_aleatorio) # logica dos ifs do game
if (ganhou == 0):
print("Parabéns, você acertou o número")
print(f"Sua pontuação: {pontos_player}/100")
else:
print(f"Infelizmente você não conseguiu adivinhar o número {numero_aleatorio}")
if (__name__ == "__main__"):
Jogar() | 0.234319 | 0.212988 |
import json
import logging
from optparse import OptionParser
import copy
import sys
import spplib.sdk.client as client
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
parser = OptionParser()
parser.add_option("--user", dest="username", help="SPP Username")
parser.add_option("--pass", dest="password", help="SPP Password")
parser.add_option("--host", dest="host", help="SPP Host, (ex. https://172.20.49.49)")
parser.add_option("--vshost", dest="vshost", help="vSnap hostname or IP")
parser.add_option("--vssite", dest="vssite", help="vSnap site name (example: Primary)")
parser.add_option("--vsuser", dest="vsuser", help="vSnap username")
parser.add_option("--vspass", dest="vspass", help="vSnap password")
(options, args) = parser.parse_args()
def prettyprint(indata):
print(json.dumps(indata, sort_keys=True,indent=4, separators=(',', ': ')))
def validate_input():
if(options.username is None or options.password is None or options.host is None or
options.vshost is None or options.vssite is None or options.vsuser is None
or options.vspass is None):
print("Invalid input, use -h switch for help")
sys.exit(2)
def find_site_by_name():
sites = client.SppAPI(session, 'coresite').get()['sites']
for site in sites:
if(site['name'].upper() == options.vssite.upper()):
return site['id']
logger.error("Site name not found")
session.logout()
sys.exit(2)
def register_vsnap():
vsnapinfo = {}
vsnapinfo['siteId'] = find_site_by_name()
vsnapinfo['hostAddress'] = options.vshost
vsnapinfo['username'] = options.vsuser
vsnapinfo['password'] = <PASSWORD>
vsnapinfo['portNumber'] = "8900"
vsnapinfo['sslConnection'] = True
vsnapinfo['type'] = "vsnap"
try:
response = client.SppAPI(session, 'storage').post(data=vsnapinfo)
print(options.vshost + " is registered")
except client.requests.exceptions.HTTPError as err:
errmsg = json.loads(err.response.content)
print(errmsg['response']['description'])
validate_input()
session = client.SppSession(options.host, options.username, options.password)
session.login()
register_vsnap()
session.logout() | samples/registervsnap.py |
import json
import logging
from optparse import OptionParser
import copy
import sys
import spplib.sdk.client as client
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
parser = OptionParser()
parser.add_option("--user", dest="username", help="SPP Username")
parser.add_option("--pass", dest="password", help="SPP Password")
parser.add_option("--host", dest="host", help="SPP Host, (ex. https://172.20.49.49)")
parser.add_option("--vshost", dest="vshost", help="vSnap hostname or IP")
parser.add_option("--vssite", dest="vssite", help="vSnap site name (example: Primary)")
parser.add_option("--vsuser", dest="vsuser", help="vSnap username")
parser.add_option("--vspass", dest="vspass", help="vSnap password")
(options, args) = parser.parse_args()
def prettyprint(indata):
print(json.dumps(indata, sort_keys=True,indent=4, separators=(',', ': ')))
def validate_input():
if(options.username is None or options.password is None or options.host is None or
options.vshost is None or options.vssite is None or options.vsuser is None
or options.vspass is None):
print("Invalid input, use -h switch for help")
sys.exit(2)
def find_site_by_name():
sites = client.SppAPI(session, 'coresite').get()['sites']
for site in sites:
if(site['name'].upper() == options.vssite.upper()):
return site['id']
logger.error("Site name not found")
session.logout()
sys.exit(2)
def register_vsnap():
vsnapinfo = {}
vsnapinfo['siteId'] = find_site_by_name()
vsnapinfo['hostAddress'] = options.vshost
vsnapinfo['username'] = options.vsuser
vsnapinfo['password'] = <PASSWORD>
vsnapinfo['portNumber'] = "8900"
vsnapinfo['sslConnection'] = True
vsnapinfo['type'] = "vsnap"
try:
response = client.SppAPI(session, 'storage').post(data=vsnapinfo)
print(options.vshost + " is registered")
except client.requests.exceptions.HTTPError as err:
errmsg = json.loads(err.response.content)
print(errmsg['response']['description'])
validate_input()
session = client.SppSession(options.host, options.username, options.password)
session.login()
register_vsnap()
session.logout() | 0.158956 | 0.067332 |
import ast
import asyncio
import tokenize
import io
import sys
from contextlib import redirect_stdout
__author__ = "Zylanx"
class OutputExprRewriter(ast.NodeTransformer):
"""
OutputExprRewriter: This transformer runs through every top level statement and wraps them in
so they send their result to the function "outputExpr".
This is the basis for the "Interactive Interpreter" style of return value display
It also removes "await" statements, just leaving the
expression afterwards (which it proceeds to process)
"""
def visit_FunctionDef(self, node):
self.generic_visit(node)
return node
def visit_AsyncFunctionDef(self, node):
return node
def visit_Expr(self, node):
if not isinstance(node.value, list):
if not node.value:
args = []
else:
args = [node.value]
else:
args = node.value
call = ast.Call(ast.Name("outputExpr", ast.Load()), args, [])
newNode = ast.Expr(value=call)
ast.copy_location(newNode, node)
ast.fix_missing_locations(newNode)
self.generic_visit(newNode)
return newNode
def visit_Await(self, node):
newNode = node.value
ast.copy_location(newNode, node)
ast.fix_missing_locations(newNode)
self.generic_visit(newNode)
return newNode
class FunctionAsyncRewriter(ast.NodeTransformer):
"""
FunctionAsyncRewriter: This transformer runs through the AST and redirects all function calls
to be wrapped by the "callFuncExec" function
"""
def visit_Call(self, node):
if not isinstance(node.args, list):
if not node.args:
args = []
else:
args = [node.args]
else:
args = node.args
args.insert(0, node.func)
call = ast.Call(ast.Name("callFuncExec", ast.Load()), args, node.keywords)
ast.copy_location(call, node)
ast.fix_missing_locations(call)
self.generic_visit(call)
return call
class FinishedSigWrapper(ast.NodeTransformer):
"""
FinishedSigWrapper: This transformer wraps the modified code in some extra code to deal with communicating
the execution to the outside world and signaling to the future that it is now done
and the command has completed, or if there is a failure, that an exception has
occurred
"""
def visit_Module(self, node):
setDoneNode = ast.Expr(ast.Call(ast.Attribute(ast.Name("finishedExecSig", ast.Load()), "set_result", ast.Load()), [ast.NameConstant(None)], []))
setExceptionNode = ast.Expr(ast.Call(ast.Attribute(ast.Name("finishedExecSig", ast.Load()), "set_exception", ast.Load()), [ast.Name("e", ast.Load())], []))
mainBody = node.body + [setDoneNode]
tryExceptNode = ast.ExceptHandler(ast.Name("Exception", ast.Load()), "e", [setExceptionNode])
tryNode = ast.Try(mainBody, [tryExceptNode], [], [])
newNode = ast.Module([tryNode])
ast.copy_location(newNode, node)
ast.fix_missing_locations(newNode)
return newNode
# TODO: Comment outputExpr
def outputExpr(value):
"""
outputExpr: Top level expressions are wrapped by a call to this function.
This function simply prints the repr of the result of the wrapped expression.
"""
if value is not None:
print(repr(value))
# TODO: Comment OutputExpr
# COMMENT: OutputExpr is just left over from before the stdout was smart piped like it is now
class OutputExpr:
def __init__(self, pipe):
self.pipe = pipe
def printExpr(self, value):
if value is not None:
print(repr(value), file=self.pipe)
# WARNING: This function messes with the internal asyncio
# event loop in ways it shouldn't. Use at your own discretion!
def callFuncExec(func, *args, **kwargs):
"""
callFuncExec: This function does most of the heavy lifting for the library
It takes in a function and depending on whether it is a normal function
or a coroutine, either execute it normally, or otherwise take
control of the asyncio event loop and run it synchronously.
"""
# If nothing passed in, then there is a fatal error and it needs to exit
if not func:
raise Exception("No function passed in")
# If the function is a coroutine, add the function to the event
# loop then step through the loop until the future has completed
if asyncio.iscoroutinefunction(func):
loop = asyncio.get_event_loop()
fut = asyncio.ensure_future(func(*args, **kwargs)) # Adds the func as a future to the loop
# Leaving our managed code so redirect stdout back to system
with redirect_stdout(sys.__stdout__):
while not fut.done(): # loop until the future is ready
loop._run_once()
result = fut.result()
else: # Normal function. Just execute as normal
result = func(*args, **kwargs)
return result
# TODO: Comment fixASTAwaitError
# TODO: Strip awaits from even more places
def fixASTAwaitError(text, offset):
tokenList = list(tokenize.tokenize(io.BytesIO(text.encode("utf-8")).readline))
def flattenList():
returnList = []
for token in tokenList:
returnList.append((token.type, token.string))
return returnList
def findTokenAtOffset(offset):
for index, token in enumerate(tokenList):
if token.start[1] <= offset and offset < token.end[1]:
return index
return None
def tokenMatchType(index, tokenType):
if tokenList[index].exact_type == tokenType:
return True
else:
return False
def tokenMatchValue(index, value):
if tokenList[index].string == value:
return True
else:
return False
def tokenMatch(index, tokenType, value):
token = tokenList[index]
if token.exact_type == tokenType and token.string == value:
return True
else:
return False
index = findTokenAtOffset(offset)
if index is None:
index = findTokenAtOffset(offset+1)
if index is None:
return None
else:
if not (tokenMatchType(index, tokenize.DOT) or tokenMatchType(index, tokenize.LPAR)):
return None
if tokenMatchType(index, tokenize.LPAR):
# It is at a function (possibly)
if tokenMatchType(index-1, tokenize.NAME):
# Very likely in a function
if tokenMatch(index-2, tokenize.NAME, "await"):
# Found an await I know I can deal with
del tokenList[index-2]
tokenList = flattenList()
return tokenize.untokenize(tokenList).decode("utf-8")
elif tokenMatchType(index, tokenize.DOT):
# Possibly an attribute call
if tokenMatchType(index+1, tokenize.NAME):
if tokenMatchType(index-1, tokenize.NAME):
if tokenMatch(index-2, tokenize.NAME, "await"):
del tokenList[index-2]
tokenList = flattenList()
return tokenize.untokenize(tokenList).decode("utf-8")
return None
# TODO: Comment parseAST
def parseAST(inputText):
for _ in range(50):
try:
outAST = ast.parse(inputText)
break
except SyntaxError as e:
lineno = e.lineno
offset = e.offset
text = e.text.rstrip("\n")
if text[0] != "\n":
text = "\n" + text
fixedLine = fixASTAwaitError(text, offset)
if fixedLine is None:
raise
else:
fixedLine = fixedLine.lstrip("\n")
inputText = inputText.splitlines()
inputText[lineno-1] = fixedLine
inputText = "\n".join(inputText)
outAST = FunctionAsyncRewriter().visit(outAST)
outAST = OutputExprRewriter().visit(outAST)
outAST = FinishedSigWrapper().visit(outAST)
return outAST | eval_ast_gen.py | import ast
import asyncio
import tokenize
import io
import sys
from contextlib import redirect_stdout
__author__ = "Zylanx"
class OutputExprRewriter(ast.NodeTransformer):
"""
OutputExprRewriter: This transformer runs through every top level statement and wraps them in
so they send their result to the function "outputExpr".
This is the basis for the "Interactive Interpreter" style of return value display
It also removes "await" statements, just leaving the
expression afterwards (which it proceeds to process)
"""
def visit_FunctionDef(self, node):
self.generic_visit(node)
return node
def visit_AsyncFunctionDef(self, node):
return node
def visit_Expr(self, node):
if not isinstance(node.value, list):
if not node.value:
args = []
else:
args = [node.value]
else:
args = node.value
call = ast.Call(ast.Name("outputExpr", ast.Load()), args, [])
newNode = ast.Expr(value=call)
ast.copy_location(newNode, node)
ast.fix_missing_locations(newNode)
self.generic_visit(newNode)
return newNode
def visit_Await(self, node):
newNode = node.value
ast.copy_location(newNode, node)
ast.fix_missing_locations(newNode)
self.generic_visit(newNode)
return newNode
class FunctionAsyncRewriter(ast.NodeTransformer):
"""
FunctionAsyncRewriter: This transformer runs through the AST and redirects all function calls
to be wrapped by the "callFuncExec" function
"""
def visit_Call(self, node):
if not isinstance(node.args, list):
if not node.args:
args = []
else:
args = [node.args]
else:
args = node.args
args.insert(0, node.func)
call = ast.Call(ast.Name("callFuncExec", ast.Load()), args, node.keywords)
ast.copy_location(call, node)
ast.fix_missing_locations(call)
self.generic_visit(call)
return call
class FinishedSigWrapper(ast.NodeTransformer):
"""
FinishedSigWrapper: This transformer wraps the modified code in some extra code to deal with communicating
the execution to the outside world and signaling to the future that it is now done
and the command has completed, or if there is a failure, that an exception has
occurred
"""
def visit_Module(self, node):
setDoneNode = ast.Expr(ast.Call(ast.Attribute(ast.Name("finishedExecSig", ast.Load()), "set_result", ast.Load()), [ast.NameConstant(None)], []))
setExceptionNode = ast.Expr(ast.Call(ast.Attribute(ast.Name("finishedExecSig", ast.Load()), "set_exception", ast.Load()), [ast.Name("e", ast.Load())], []))
mainBody = node.body + [setDoneNode]
tryExceptNode = ast.ExceptHandler(ast.Name("Exception", ast.Load()), "e", [setExceptionNode])
tryNode = ast.Try(mainBody, [tryExceptNode], [], [])
newNode = ast.Module([tryNode])
ast.copy_location(newNode, node)
ast.fix_missing_locations(newNode)
return newNode
# TODO: Comment outputExpr
def outputExpr(value):
"""
outputExpr: Top level expressions are wrapped by a call to this function.
This function simply prints the repr of the result of the wrapped expression.
"""
if value is not None:
print(repr(value))
# TODO: Comment OutputExpr
# COMMENT: OutputExpr is just left over from before the stdout was smart piped like it is now
class OutputExpr:
def __init__(self, pipe):
self.pipe = pipe
def printExpr(self, value):
if value is not None:
print(repr(value), file=self.pipe)
# WARNING: This function messes with the internal asyncio
# event loop in ways it shouldn't. Use at your own discretion!
def callFuncExec(func, *args, **kwargs):
"""
callFuncExec: This function does most of the heavy lifting for the library
It takes in a function and depending on whether it is a normal function
or a coroutine, either execute it normally, or otherwise take
control of the asyncio event loop and run it synchronously.
"""
# If nothing passed in, then there is a fatal error and it needs to exit
if not func:
raise Exception("No function passed in")
# If the function is a coroutine, add the function to the event
# loop then step through the loop until the future has completed
if asyncio.iscoroutinefunction(func):
loop = asyncio.get_event_loop()
fut = asyncio.ensure_future(func(*args, **kwargs)) # Adds the func as a future to the loop
# Leaving our managed code so redirect stdout back to system
with redirect_stdout(sys.__stdout__):
while not fut.done(): # loop until the future is ready
loop._run_once()
result = fut.result()
else: # Normal function. Just execute as normal
result = func(*args, **kwargs)
return result
# TODO: Comment fixASTAwaitError
# TODO: Strip awaits from even more places
def fixASTAwaitError(text, offset):
tokenList = list(tokenize.tokenize(io.BytesIO(text.encode("utf-8")).readline))
def flattenList():
returnList = []
for token in tokenList:
returnList.append((token.type, token.string))
return returnList
def findTokenAtOffset(offset):
for index, token in enumerate(tokenList):
if token.start[1] <= offset and offset < token.end[1]:
return index
return None
def tokenMatchType(index, tokenType):
if tokenList[index].exact_type == tokenType:
return True
else:
return False
def tokenMatchValue(index, value):
if tokenList[index].string == value:
return True
else:
return False
def tokenMatch(index, tokenType, value):
token = tokenList[index]
if token.exact_type == tokenType and token.string == value:
return True
else:
return False
index = findTokenAtOffset(offset)
if index is None:
index = findTokenAtOffset(offset+1)
if index is None:
return None
else:
if not (tokenMatchType(index, tokenize.DOT) or tokenMatchType(index, tokenize.LPAR)):
return None
if tokenMatchType(index, tokenize.LPAR):
# It is at a function (possibly)
if tokenMatchType(index-1, tokenize.NAME):
# Very likely in a function
if tokenMatch(index-2, tokenize.NAME, "await"):
# Found an await I know I can deal with
del tokenList[index-2]
tokenList = flattenList()
return tokenize.untokenize(tokenList).decode("utf-8")
elif tokenMatchType(index, tokenize.DOT):
# Possibly an attribute call
if tokenMatchType(index+1, tokenize.NAME):
if tokenMatchType(index-1, tokenize.NAME):
if tokenMatch(index-2, tokenize.NAME, "await"):
del tokenList[index-2]
tokenList = flattenList()
return tokenize.untokenize(tokenList).decode("utf-8")
return None
# TODO: Comment parseAST
def parseAST(inputText):
for _ in range(50):
try:
outAST = ast.parse(inputText)
break
except SyntaxError as e:
lineno = e.lineno
offset = e.offset
text = e.text.rstrip("\n")
if text[0] != "\n":
text = "\n" + text
fixedLine = fixASTAwaitError(text, offset)
if fixedLine is None:
raise
else:
fixedLine = fixedLine.lstrip("\n")
inputText = inputText.splitlines()
inputText[lineno-1] = fixedLine
inputText = "\n".join(inputText)
outAST = FunctionAsyncRewriter().visit(outAST)
outAST = OutputExprRewriter().visit(outAST)
outAST = FinishedSigWrapper().visit(outAST)
return outAST | 0.296552 | 0.302797 |
from __future__ import print_function
from twitchstream.outputvideo import TwitchBufferedOutputStream
from twitchstream.chat import TwitchChatStream
import argparse
import time
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
required.add_argument('-u', '--username',
help='twitch username',
required=True)
required.add_argument('-o', '--oauth',
help='twitch oauth '
'(visit https://twitchapps.com/tmi/ '
'to create one for your account)',
required=True)
required.add_argument('-s', '--streamkey',
help='twitch streamkey',
required=True)
args = parser.parse_args()
# load two streams:
# * one stream to send the video
# * one stream to interact with the chat
with TwitchBufferedOutputStream(
twitch_stream_key=args.streamkey,
width=640,
height=480,
fps=30.,
enable_audio=True,
verbose=False) as videostream, \
TwitchChatStream(
username=args.username,
oauth=args.oauth,
verbose=False) as chatstream:
# Send a chat message to let everybody know you've arrived
chatstream.send_chat_message("Taking requests!")
frame = np.zeros((480, 640, 3))
frequency = 100
last_phase = 0
# The main loop to create videos
while True:
# Every loop, call to receive messages.
# This is important, when it is not called,
# Twitch will automatically log you out.
# This call is non-blocking.
received = chatstream.twitch_receive_messages()
# process all the messages
if received:
for chat_message in received:
print("Got a message '%s' from %s" % (
chat_message['message'],
chat_message['username']
))
if chat_message['message'] == "red":
frame[:, :, :] = np.array(
[1, 0, 0])[None, None, :]
elif chat_message['message'] == "green":
frame[:, :, :] = np.array(
[0, 1, 0])[None, None, :]
elif chat_message['message'] == "blue":
frame[:, :, :] = np.array(
[0, 0, 1])[None, None, :]
elif chat_message['message'].isdigit():
frequency = int(chat_message['message'])
# If there are not enough video frames left,
# add some more.
if videostream.get_video_frame_buffer_state() < 30:
videostream.send_video_frame(frame)
# If there are not enough audio fragments left,
# add some more, but take care to stay in sync with
# the video! Audio and video buffer separately,
# so they will go out of sync if the number of video
# frames does not match the number of audio samples!
elif videostream.get_audio_buffer_state() < 30:
x = np.linspace(last_phase,
last_phase +
frequency*2*np.pi/videostream.fps,
int(44100 / videostream.fps) + 1)
last_phase = x[-1]
audio = np.sin(x[:-1])
videostream.send_audio(audio, audio)
# If nothing is happening, it is okay to sleep for a while
# and take some pressure of the CPU. But not too long, if
# the buffers run dry, audio and video will go out of sync.
else:
time.sleep(.001) | examples/color.py | from __future__ import print_function
from twitchstream.outputvideo import TwitchBufferedOutputStream
from twitchstream.chat import TwitchChatStream
import argparse
import time
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
required.add_argument('-u', '--username',
help='twitch username',
required=True)
required.add_argument('-o', '--oauth',
help='twitch oauth '
'(visit https://twitchapps.com/tmi/ '
'to create one for your account)',
required=True)
required.add_argument('-s', '--streamkey',
help='twitch streamkey',
required=True)
args = parser.parse_args()
# load two streams:
# * one stream to send the video
# * one stream to interact with the chat
with TwitchBufferedOutputStream(
twitch_stream_key=args.streamkey,
width=640,
height=480,
fps=30.,
enable_audio=True,
verbose=False) as videostream, \
TwitchChatStream(
username=args.username,
oauth=args.oauth,
verbose=False) as chatstream:
# Send a chat message to let everybody know you've arrived
chatstream.send_chat_message("Taking requests!")
frame = np.zeros((480, 640, 3))
frequency = 100
last_phase = 0
# The main loop to create videos
while True:
# Every loop, call to receive messages.
# This is important, when it is not called,
# Twitch will automatically log you out.
# This call is non-blocking.
received = chatstream.twitch_receive_messages()
# process all the messages
if received:
for chat_message in received:
print("Got a message '%s' from %s" % (
chat_message['message'],
chat_message['username']
))
if chat_message['message'] == "red":
frame[:, :, :] = np.array(
[1, 0, 0])[None, None, :]
elif chat_message['message'] == "green":
frame[:, :, :] = np.array(
[0, 1, 0])[None, None, :]
elif chat_message['message'] == "blue":
frame[:, :, :] = np.array(
[0, 0, 1])[None, None, :]
elif chat_message['message'].isdigit():
frequency = int(chat_message['message'])
# If there are not enough video frames left,
# add some more.
if videostream.get_video_frame_buffer_state() < 30:
videostream.send_video_frame(frame)
# If there are not enough audio fragments left,
# add some more, but take care to stay in sync with
# the video! Audio and video buffer separately,
# so they will go out of sync if the number of video
# frames does not match the number of audio samples!
elif videostream.get_audio_buffer_state() < 30:
x = np.linspace(last_phase,
last_phase +
frequency*2*np.pi/videostream.fps,
int(44100 / videostream.fps) + 1)
last_phase = x[-1]
audio = np.sin(x[:-1])
videostream.send_audio(audio, audio)
# If nothing is happening, it is okay to sleep for a while
# and take some pressure of the CPU. But not too long, if
# the buffers run dry, audio and video will go out of sync.
else:
time.sleep(.001) | 0.449876 | 0.107813 |
import importlib
import math
from collections import defaultdict
from itertools import chain
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from transformers import BertTokenizerFast
from common import ModelType
from data import ProtestaData
from models import SequenceClassifier, SequenceTagger
class Inferencer:
def __init__(
self,
model_dir: Path,
input_file: Path):
"""
TODO
"""
self.model_type, self.pretrained_model, self.crf_decoding, self.encoding_mode, self.data_size = model_dir.name.split(
'_')
self.input_file = input_file
self.output_file_name = input_file.with_suffix(
f'.{self.model_type}_{self.pretrained_model}_{self.crf_decoding}_{self.encoding_mode}_{self.data_size}')
num_tags = 2 if self.model_type == 'classifier' else 19
if self.model_type != 'classifier':
self.index2label = {
0: 'B-etime',
1: 'B-fname',
2: 'B-loc',
3: 'B-organizer',
4: 'B-participant',
5: 'B-place',
6: 'B-target',
7: 'B-trigger',
8: 'I-etime',
9: 'I-fname',
10: 'I-loc',
11: 'I-organizer',
12: 'I-participant',
13: 'I-place',
14: 'I-target',
15: 'I-trigger',
16: 'O',
17: 'O',
18: 'O'}
self.tokenizer = BertTokenizerFast.from_pretrained(
self.pretrained_model)
if self.model_type == 'tagger':
self.model = SequenceTagger(
self.pretrained_model, num_tags, self.crf_decoding)
elif self.model_type == 'classifier':
self.model = SequenceClassifier(self.pretrained_model, num_tags)
self.model.load_weights(f'{model_dir.as_posix()}/model.saved_model/')
print(f'Running inference on {input_file} using {model_dir.name}')
self.load_tokenized_data()
def load_tokenized_data(self):
df = pd.read_table(self.input_file, quoting=3, names=['token'], usecols=[0])
df['splits'] = df.token.apply(self.tokenizer.tokenize)
df['ids'] = df.splits.apply(self.tokenizer.convert_tokens_to_ids)
df['sentence_id'] = df.token.str.contains(
'SAMPLE_START').astype(int).cumsum()-1
df = df[~df.token.isin(['SAMPLE_START', '[SEP]'])]
sentence_grouped = df.groupby('sentence_id')
self.df = list(chain.from_iterable(np.array_split(g, math.ceil(
g.ids.apply(len).sum()/509)) for _, g in sentence_grouped))
input_ids = [np.concatenate([
np.array([101]),
chunk.explode('ids').ids.values,
np.array([102])]) for chunk in self.df]
encoded_data = tf.data.Dataset.from_tensor_slices({
'input_ids': tf.ragged.constant(input_ids).to_tensor(0),
'attention_mask': tf.ragged.constant([[1]*len(x) for x in input_ids]).to_tensor(0),
'token_type_ids': tf.ragged.constant([[0]*len(x) for x in input_ids]).to_tensor(0),
})
return encoded_data.batch(8)
def run(self):
data = self.load_tokenized_data()
predictions = self.model.predict(data)['predictions']
output_lines = []
for chunk_id, chunk in enumerate(self.df):
tmp = chunk.explode('ids')
tmp['predictions'] = predictions[chunk_id][1:tmp.shape[0]+1]
for n, g in tmp.groupby(tmp.index):
output_lines.append(
f'{g.token.iloc[0]}\t{self.index2label[g.predictions.iloc[0]]}')
with open(self.input_file, 'r') as f:
for idx, line in enumerate(f):
if line.strip() in ['SAMPLE_START', '[SEP]']:
output_lines.insert(idx, f'{line.strip()}\tO')
elif line.strip() == '':
output_lines.insert(idx, line.strip())
else:
pass
with open(self.output_file_name, 'w') as f:
for line in output_lines:
f.write(f'{line}\n') | inference.py | import importlib
import math
from collections import defaultdict
from itertools import chain
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from transformers import BertTokenizerFast
from common import ModelType
from data import ProtestaData
from models import SequenceClassifier, SequenceTagger
class Inferencer:
def __init__(
self,
model_dir: Path,
input_file: Path):
"""
TODO
"""
self.model_type, self.pretrained_model, self.crf_decoding, self.encoding_mode, self.data_size = model_dir.name.split(
'_')
self.input_file = input_file
self.output_file_name = input_file.with_suffix(
f'.{self.model_type}_{self.pretrained_model}_{self.crf_decoding}_{self.encoding_mode}_{self.data_size}')
num_tags = 2 if self.model_type == 'classifier' else 19
if self.model_type != 'classifier':
self.index2label = {
0: 'B-etime',
1: 'B-fname',
2: 'B-loc',
3: 'B-organizer',
4: 'B-participant',
5: 'B-place',
6: 'B-target',
7: 'B-trigger',
8: 'I-etime',
9: 'I-fname',
10: 'I-loc',
11: 'I-organizer',
12: 'I-participant',
13: 'I-place',
14: 'I-target',
15: 'I-trigger',
16: 'O',
17: 'O',
18: 'O'}
self.tokenizer = BertTokenizerFast.from_pretrained(
self.pretrained_model)
if self.model_type == 'tagger':
self.model = SequenceTagger(
self.pretrained_model, num_tags, self.crf_decoding)
elif self.model_type == 'classifier':
self.model = SequenceClassifier(self.pretrained_model, num_tags)
self.model.load_weights(f'{model_dir.as_posix()}/model.saved_model/')
print(f'Running inference on {input_file} using {model_dir.name}')
self.load_tokenized_data()
def load_tokenized_data(self):
df = pd.read_table(self.input_file, quoting=3, names=['token'], usecols=[0])
df['splits'] = df.token.apply(self.tokenizer.tokenize)
df['ids'] = df.splits.apply(self.tokenizer.convert_tokens_to_ids)
df['sentence_id'] = df.token.str.contains(
'SAMPLE_START').astype(int).cumsum()-1
df = df[~df.token.isin(['SAMPLE_START', '[SEP]'])]
sentence_grouped = df.groupby('sentence_id')
self.df = list(chain.from_iterable(np.array_split(g, math.ceil(
g.ids.apply(len).sum()/509)) for _, g in sentence_grouped))
input_ids = [np.concatenate([
np.array([101]),
chunk.explode('ids').ids.values,
np.array([102])]) for chunk in self.df]
encoded_data = tf.data.Dataset.from_tensor_slices({
'input_ids': tf.ragged.constant(input_ids).to_tensor(0),
'attention_mask': tf.ragged.constant([[1]*len(x) for x in input_ids]).to_tensor(0),
'token_type_ids': tf.ragged.constant([[0]*len(x) for x in input_ids]).to_tensor(0),
})
return encoded_data.batch(8)
def run(self):
data = self.load_tokenized_data()
predictions = self.model.predict(data)['predictions']
output_lines = []
for chunk_id, chunk in enumerate(self.df):
tmp = chunk.explode('ids')
tmp['predictions'] = predictions[chunk_id][1:tmp.shape[0]+1]
for n, g in tmp.groupby(tmp.index):
output_lines.append(
f'{g.token.iloc[0]}\t{self.index2label[g.predictions.iloc[0]]}')
with open(self.input_file, 'r') as f:
for idx, line in enumerate(f):
if line.strip() in ['SAMPLE_START', '[SEP]']:
output_lines.insert(idx, f'{line.strip()}\tO')
elif line.strip() == '':
output_lines.insert(idx, line.strip())
else:
pass
with open(self.output_file_name, 'w') as f:
for line in output_lines:
f.write(f'{line}\n') | 0.464416 | 0.239188 |
__all__ = [
"BasicLinter",
]
from beet import Context
from tokenstream import set_location
from mecha import (
AstCommand,
AstSelector,
Diagnostic,
DiagnosticCollection,
Mecha,
Reducer,
rule,
)
def beet_default(ctx: Context):
mc = ctx.inject(Mecha)
mc.lint.extend(BasicLinter())
class BasicLinter(Reducer):
"""Linter with basic rules."""
@rule(AstCommand, identifier="execute:subcommand")
def execute_run(self, node: AstCommand):
if isinstance(clause := node.arguments[0], AstCommand):
if clause.identifier == "execute:run:subcommand":
raise set_location(
Diagnostic("warn", "Redundant `execute run` clause."),
node,
clause.arguments[0].location.with_horizontal_offset(-1),
)
@rule(AstCommand, identifier="execute:run:subcommand")
def run_execute(self, node: AstCommand):
if isinstance(clause := node.arguments[0], AstCommand):
if clause.identifier == "execute:subcommand":
raise set_location(
Diagnostic("warn", "Redundant `run execute` clause."),
node,
clause.arguments[0].location.with_horizontal_offset(-1),
)
@rule(AstSelector)
def selector_argument_order(self, node: AstSelector):
order = [
"type",
"gamemode",
"inverted gamemode",
"team",
"inverted team",
"inverted type",
"tag",
"inverted tag",
"name",
"inverted name",
"scores",
"predicate",
"inverted predicate",
"advancements",
"nbt",
]
conflict = [-1] * len(order)
with DiagnosticCollection() as diagnostics:
for i, arg in enumerate(node.arguments):
name = "inverted " * arg.inverted + arg.key.value
try:
index = order.index(name)
except ValueError:
continue
j = conflict[index]
if j >= 0:
bad_arg = node.arguments[j]
bad_arg_name = "inverted " * bad_arg.inverted + bad_arg.key.value
d = Diagnostic(
level="warn",
message=f"{name.capitalize()} argument should go before {bad_arg_name}.",
)
diagnostics.add(set_location(d, arg))
for conflict_index in range(index):
if conflict[conflict_index] < 0:
conflict[conflict_index] = i | mecha/contrib/lint_basic.py | __all__ = [
"BasicLinter",
]
from beet import Context
from tokenstream import set_location
from mecha import (
AstCommand,
AstSelector,
Diagnostic,
DiagnosticCollection,
Mecha,
Reducer,
rule,
)
def beet_default(ctx: Context):
mc = ctx.inject(Mecha)
mc.lint.extend(BasicLinter())
class BasicLinter(Reducer):
"""Linter with basic rules."""
@rule(AstCommand, identifier="execute:subcommand")
def execute_run(self, node: AstCommand):
if isinstance(clause := node.arguments[0], AstCommand):
if clause.identifier == "execute:run:subcommand":
raise set_location(
Diagnostic("warn", "Redundant `execute run` clause."),
node,
clause.arguments[0].location.with_horizontal_offset(-1),
)
@rule(AstCommand, identifier="execute:run:subcommand")
def run_execute(self, node: AstCommand):
if isinstance(clause := node.arguments[0], AstCommand):
if clause.identifier == "execute:subcommand":
raise set_location(
Diagnostic("warn", "Redundant `run execute` clause."),
node,
clause.arguments[0].location.with_horizontal_offset(-1),
)
@rule(AstSelector)
def selector_argument_order(self, node: AstSelector):
order = [
"type",
"gamemode",
"inverted gamemode",
"team",
"inverted team",
"inverted type",
"tag",
"inverted tag",
"name",
"inverted name",
"scores",
"predicate",
"inverted predicate",
"advancements",
"nbt",
]
conflict = [-1] * len(order)
with DiagnosticCollection() as diagnostics:
for i, arg in enumerate(node.arguments):
name = "inverted " * arg.inverted + arg.key.value
try:
index = order.index(name)
except ValueError:
continue
j = conflict[index]
if j >= 0:
bad_arg = node.arguments[j]
bad_arg_name = "inverted " * bad_arg.inverted + bad_arg.key.value
d = Diagnostic(
level="warn",
message=f"{name.capitalize()} argument should go before {bad_arg_name}.",
)
diagnostics.add(set_location(d, arg))
for conflict_index in range(index):
if conflict[conflict_index] < 0:
conflict[conflict_index] = i | 0.542863 | 0.160792 |
from torch.utils.data import Dataset
import pymongo
import json
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
class MongoWrapper:
"""
Load single turn Q,A data
"""
def __init__(self, config_path, filter_func=None):
"""
1. MongoDB collection들을 통합된 인덱스로 접근할 수 있음
2. 개별 collection의 idx는 개수, 순서, 유니크를 보장해야함
:param config_path: db config 경로
"""
with open(config_path) as fp:
db_config = json.load(fp)
self.db_config = db_config
self.filter_func = filter_func
conn_str = db_config['MONGO_CONNECTION_STRING']
con_db = db_config['MONGO_CONNECTION_DB']
collection_list = db_config['COLLECTIONS']
self.connection = pymongo.MongoClient(conn_str)
self.db = self.connection.get_database(con_db)
self.collections = self._load_collections(collection_list)
self.meta_info = self._load_metainfo(collection_list)
self.ndoc = None
logging.info("[Mongo]: Loaded %s" % self.meta_info)
def __len__(self):
if not self.ndoc:
ndoc = 0
for value in self.meta_info.values():
ndoc += value['num_docs']
self.ndoc = ndoc
return self.ndoc
def __getitem__(self, idx):
docs = []
if isinstance(idx, slice):
for nidx in range(idx.start, idx.stop):
collection_name, idx = self._convert_idx(nidx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
else:
collection_name, idx = self._convert_idx(idx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
def _load_collections(self, collection_list):
if not isinstance(collection_list, list):
collection_list = [collection_list]
collections = dict()
for col in collection_list:
collections[col] = self.db[col]
logger.info("[Mongo]: %s is loaded" % col)
return collections
def _load_metainfo(self, collection_list):
meta_info_conn = self.db['meta_info']
meta_info = OrderedDict()
for item in list(meta_info_conn.find({})):
if item['collection_name'] not in collection_list:
continue
collection_name = item['collection_name']
sub_dict = {'num_docs': item['num_docs']}
meta_info.update({collection_name: sub_dict})
prev = 0
for name, info in meta_info.items():
sub_info = {'sidx': prev, 'eidx': prev + info['num_docs']}
prev = prev + info['num_docs']
info.update(sub_info)
return meta_info
def _convert_idx(self, idx):
"""
collection 따라서 idx 를 변환하기
:param idx:
:return:
"""
collection_name = None
for name, info in self.meta_info.items():
if idx >= info['sidx'] and idx < info['eidx']:
idx = idx - info['sidx']
collection_name = name
break
return collection_name, idx
def _get_update_op(self, doc, fields):
if not isinstance(fields, list):
fields = [fields]
set_dict = dict()
for f in fields:
set_dict[f] = doc[f]
return pymongo.UpdateOne({'_id': doc['_id']}, {"$set": set_dict}, upsert=True)
def _get_insert_op(self, doc):
return pymongo.InsertOne(doc)
def update_docs(self, docs, fields):
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_update_op(doc, fields)
ops.append(op)
return ops
def insert_docs(self, docs, collection_name):
if collection_name not in self.collections:
raise KeyError
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_insert_op(doc)
ops.append(op)
# logging.info(ops[:10])
self.collections[collection_name].bulk_write(ops, ordered=False)
def update_meta_info(self, collection_name):
is_update = False
if collection_name in self.meta_info:
is_update = True
total_docs = self.collections[collection_name].count_documents({})
logging.info("[Update]: collection - %s " % collection_name)
logging.info("[Update]: total docs - %s " % total_docs)
logging.info("[Update]: meta info - %s " % is_update)
if is_update:
self.db['meta_info'].update_one({'collection_name': collection_name},
{'$set':{'num_docs': total_docs}})
else:
self.db['meta_info'].insert_one({'collection_name': collection_name,
'num_docs': total_docs})
collection_list = self.db_config['COLLECTIONS']
self.meta_info = self._load_metainfo(collection_list)
def export_to_file(self, fpath, collection_name):
logging.info("[Export]: %s" % fpath)
info = self.meta_info[collection_name]
info = dict(info)
num_docs = int(info['num_docs'])
with open(fpath, 'w') as fp:
text_lines = []
for idx in range(num_docs):
doc = self.__getitem__(idx)[0]
text = doc['data']['filt_text']
text += '\n'
text_lines.append(text)
if idx % 10000 == 0:
fp.writelines(text_lines)
text_lines = []
logging.info("[Write]: %d" % idx)
def create_single_index(self, collection_name, index_name, order=1):
self.collections[collection_name].create_index([(index_name, order)]) | libs/mongo_wrapper.py | from torch.utils.data import Dataset
import pymongo
import json
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
class MongoWrapper:
"""
Load single turn Q,A data
"""
def __init__(self, config_path, filter_func=None):
"""
1. MongoDB collection들을 통합된 인덱스로 접근할 수 있음
2. 개별 collection의 idx는 개수, 순서, 유니크를 보장해야함
:param config_path: db config 경로
"""
with open(config_path) as fp:
db_config = json.load(fp)
self.db_config = db_config
self.filter_func = filter_func
conn_str = db_config['MONGO_CONNECTION_STRING']
con_db = db_config['MONGO_CONNECTION_DB']
collection_list = db_config['COLLECTIONS']
self.connection = pymongo.MongoClient(conn_str)
self.db = self.connection.get_database(con_db)
self.collections = self._load_collections(collection_list)
self.meta_info = self._load_metainfo(collection_list)
self.ndoc = None
logging.info("[Mongo]: Loaded %s" % self.meta_info)
def __len__(self):
if not self.ndoc:
ndoc = 0
for value in self.meta_info.values():
ndoc += value['num_docs']
self.ndoc = ndoc
return self.ndoc
def __getitem__(self, idx):
docs = []
if isinstance(idx, slice):
for nidx in range(idx.start, idx.stop):
collection_name, idx = self._convert_idx(nidx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
else:
collection_name, idx = self._convert_idx(idx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
def _load_collections(self, collection_list):
if not isinstance(collection_list, list):
collection_list = [collection_list]
collections = dict()
for col in collection_list:
collections[col] = self.db[col]
logger.info("[Mongo]: %s is loaded" % col)
return collections
def _load_metainfo(self, collection_list):
meta_info_conn = self.db['meta_info']
meta_info = OrderedDict()
for item in list(meta_info_conn.find({})):
if item['collection_name'] not in collection_list:
continue
collection_name = item['collection_name']
sub_dict = {'num_docs': item['num_docs']}
meta_info.update({collection_name: sub_dict})
prev = 0
for name, info in meta_info.items():
sub_info = {'sidx': prev, 'eidx': prev + info['num_docs']}
prev = prev + info['num_docs']
info.update(sub_info)
return meta_info
def _convert_idx(self, idx):
"""
collection 따라서 idx 를 변환하기
:param idx:
:return:
"""
collection_name = None
for name, info in self.meta_info.items():
if idx >= info['sidx'] and idx < info['eidx']:
idx = idx - info['sidx']
collection_name = name
break
return collection_name, idx
def _get_update_op(self, doc, fields):
if not isinstance(fields, list):
fields = [fields]
set_dict = dict()
for f in fields:
set_dict[f] = doc[f]
return pymongo.UpdateOne({'_id': doc['_id']}, {"$set": set_dict}, upsert=True)
def _get_insert_op(self, doc):
return pymongo.InsertOne(doc)
def update_docs(self, docs, fields):
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_update_op(doc, fields)
ops.append(op)
return ops
def insert_docs(self, docs, collection_name):
if collection_name not in self.collections:
raise KeyError
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_insert_op(doc)
ops.append(op)
# logging.info(ops[:10])
self.collections[collection_name].bulk_write(ops, ordered=False)
def update_meta_info(self, collection_name):
is_update = False
if collection_name in self.meta_info:
is_update = True
total_docs = self.collections[collection_name].count_documents({})
logging.info("[Update]: collection - %s " % collection_name)
logging.info("[Update]: total docs - %s " % total_docs)
logging.info("[Update]: meta info - %s " % is_update)
if is_update:
self.db['meta_info'].update_one({'collection_name': collection_name},
{'$set':{'num_docs': total_docs}})
else:
self.db['meta_info'].insert_one({'collection_name': collection_name,
'num_docs': total_docs})
collection_list = self.db_config['COLLECTIONS']
self.meta_info = self._load_metainfo(collection_list)
def export_to_file(self, fpath, collection_name):
logging.info("[Export]: %s" % fpath)
info = self.meta_info[collection_name]
info = dict(info)
num_docs = int(info['num_docs'])
with open(fpath, 'w') as fp:
text_lines = []
for idx in range(num_docs):
doc = self.__getitem__(idx)[0]
text = doc['data']['filt_text']
text += '\n'
text_lines.append(text)
if idx % 10000 == 0:
fp.writelines(text_lines)
text_lines = []
logging.info("[Write]: %d" % idx)
def create_single_index(self, collection_name, index_name, order=1):
self.collections[collection_name].create_index([(index_name, order)]) | 0.546617 | 0.171408 |
from __future__ import print_function
import sys
import argparse
DEFAULT = 8
#Argv voodoo so Kivy does not take over the world of arguments
argv = sys.argv[1:]
sys.argv = sys.argv[0]
parser = argparse.ArgumentParser(description='Read a QRcode as binary data')
#Converting arguments
parser.add_argument('filename', help="The image to interpret")
parser.add_argument('-xblocks', type=int, help="The amount of squares in width. Default is 8")
parser.add_argument('-yblocks', type=int, help="The amount of squares in height. Default is 8")
parser.add_argument('-offsetx', type=int, help="The x-offset in pixels")
parser.add_argument('-offsety', type=int, help="The y-offset in pixels")
parser.add_argument('-markx', type=int, help="The amount of squares of the markers in width. Default is 8.")
parser.add_argument('-marky', type=int, help="The amount of squares of the markers in height. Default is 8.")
parser.add_argument('-marginx', type=int, help="The margin at the right in pixels")
parser.add_argument('-marginy', type=int, help="The margin at the bottom in pixels")
parser.add_argument('--inverse', action='store_true', default=False, help="Inverse the binary data")
#Flag arguments
parser.add_argument('--ascii', action='store_true', default=False, help="Print the binary data as ascii")
parser.add_argument('--binary', action='store_true', default=False, help="Print the binary data as binary")
parser.add_argument('--gui', action='store_true', default=False, help="Experimental GUI mode")
args = parser.parse_args(argv)
if args.gui:
#importing binterpretapp later, so we can use argparse correctly
from binterpret.gui import BinterpretApp
gui = BinterpretApp()
gui.run()
exit(1)
xblocks = args.xblocks if args.xblocks != None else DEFAULT
yblocks = args.yblocks if args.yblocks != None else DEFAULT
markx = args.markx if args.markx != None else DEFAULT
marky = args.marky if args.marky != None else DEFAULT
offsetx = args.offsetx if args.offsetx != None else 0
offsety = args.offsety if args.offsety != None else 0
marginx = args.marginx if args.marginx != None else 0
marginy = args.marginy if args.marginy != None else 0
from binterpret.functions import process_qr
data = process_qr(
args.filename,
xblocks, yblocks,
offsetx, offsety,
marginx, marginy,
markx, marky,
args.inverse
)
if args.binary:
print(data)
if args.ascii:
d = [data[8*i:8*(i+1)] for i in range(len(data)/8)]
d = [int(i, 2) for i in d]
print("".join(chr(i) for i in d)) | binterpret.py | from __future__ import print_function
import sys
import argparse
DEFAULT = 8
#Argv voodoo so Kivy does not take over the world of arguments
argv = sys.argv[1:]
sys.argv = sys.argv[0]
parser = argparse.ArgumentParser(description='Read a QRcode as binary data')
#Converting arguments
parser.add_argument('filename', help="The image to interpret")
parser.add_argument('-xblocks', type=int, help="The amount of squares in width. Default is 8")
parser.add_argument('-yblocks', type=int, help="The amount of squares in height. Default is 8")
parser.add_argument('-offsetx', type=int, help="The x-offset in pixels")
parser.add_argument('-offsety', type=int, help="The y-offset in pixels")
parser.add_argument('-markx', type=int, help="The amount of squares of the markers in width. Default is 8.")
parser.add_argument('-marky', type=int, help="The amount of squares of the markers in height. Default is 8.")
parser.add_argument('-marginx', type=int, help="The margin at the right in pixels")
parser.add_argument('-marginy', type=int, help="The margin at the bottom in pixels")
parser.add_argument('--inverse', action='store_true', default=False, help="Inverse the binary data")
#Flag arguments
parser.add_argument('--ascii', action='store_true', default=False, help="Print the binary data as ascii")
parser.add_argument('--binary', action='store_true', default=False, help="Print the binary data as binary")
parser.add_argument('--gui', action='store_true', default=False, help="Experimental GUI mode")
args = parser.parse_args(argv)
if args.gui:
#importing binterpretapp later, so we can use argparse correctly
from binterpret.gui import BinterpretApp
gui = BinterpretApp()
gui.run()
exit(1)
xblocks = args.xblocks if args.xblocks != None else DEFAULT
yblocks = args.yblocks if args.yblocks != None else DEFAULT
markx = args.markx if args.markx != None else DEFAULT
marky = args.marky if args.marky != None else DEFAULT
offsetx = args.offsetx if args.offsetx != None else 0
offsety = args.offsety if args.offsety != None else 0
marginx = args.marginx if args.marginx != None else 0
marginy = args.marginy if args.marginy != None else 0
from binterpret.functions import process_qr
data = process_qr(
args.filename,
xblocks, yblocks,
offsetx, offsety,
marginx, marginy,
markx, marky,
args.inverse
)
if args.binary:
print(data)
if args.ascii:
d = [data[8*i:8*(i+1)] for i in range(len(data)/8)]
d = [int(i, 2) for i in d]
print("".join(chr(i) for i in d)) | 0.366703 | 0.089216 |
import tensorflow as tf
import numpy as np
from .net import Net
class VAE(Net):
def __init__(self, dil=1, latent_dim=128):
self.weights = {}
self.trainable = {}
self.dil = dil
self.latent_dim = latent_dim
def conv(self, name, inp, ksz, stride=1, bias=True, relu='relu', dil=1):
out = super().conv(
name, inp, ksz, stride=stride, dil=dil,
bias=bias, relu=relu, pad='VALID', trainable=True)
return out
def conv_transpose(
self, name, inp, ksz, outsp,
stride=1, bias=True, pad='VALID', relu='relu'):
out = super().conv_transpose(
name, inp, ksz, outsp, stride=stride,
bias=bias, relu=relu, pad=pad, trainable=True)
return out
def maxpool(inp, ksz, pad='VALID', stride=1):
return tf.nn.pool(inp, [ksz, ksz], 'MAX', pad, [1, 1], [stride, stride])
def prior_net(self, feat):
bsz, hnps, wnps = feat.get_shape().as_list()[:3]
out = self.conv('Prior_1', feat, [1, 1024], bias=True)
out = self.conv('Prior_2', out, [1, 512], bias=True)
out = self.conv('Prior_3', out, [3, 512], bias=True, dil=self.dil)
out = self.conv('Prior_4', out, [3, 256], bias=True, dil=self.dil)
out = self.conv('Prior_5', out, [1, 256], bias=True)
out = self.conv('Prior_6', out, [1, 256], bias=True)
out = self.conv(
'Prior_7', out, [1, self.latent_dim * 2], bias=True, relu=False)
return out[..., :self.latent_dim], out[..., self.latent_dim:]
def posterior_net(self, feat, patch):
bsz, hnps, wnps, psz = patch.get_shape().as_list()
psz = int(np.sqrt(psz))
out = tf.reshape(patch, [-1, psz, psz, 1])
feat = self.conv(
'Posterior_0', feat, [3, 1024], bias=True, dil=self.dil)
feat = self.conv(
'Posterior_1', feat, [3, 256], bias=True, dil=self.dil)
out = self.conv(
'Posterior_2', out, [3, 8], stride=2, bias=True) # 16x16
out = self.conv(
'Posterior_3', out, [2, 16], stride=2, bias=True) # 8x8
out = self.conv(
'Posterior_4', out, [2, 32], stride=2, bias=True) # 4x4
out = self.conv(
'Posterior_5', out, [2, 64], stride=2, bias=True) # 2x2
out = tf.reshape(out, [bsz, hnps, wnps, -1])
out = tf.concat([out, feat], axis=-1)
out = self.conv('Posterior_6', out, [1, 1024], bias=True)
out = self.conv('Posterior_7', out, [1, 512], bias=True)
out = self.conv('Posterior_8', out, [1, 256], bias=True)
out = self.conv(
'Posterior_9', out, [1, self.latent_dim * 2], bias=True, relu=False)
return out[..., :self.latent_dim], out[..., self.latent_dim:]
def generate(self, feat, latent):
bsz, hnps, wnps = feat.get_shape().as_list()[:3]
out = self.conv('Gen_1', feat, [1, 1024], bias=True)
out = self.conv('Gen_2', out, [1, 512], bias=True)
out = self.conv('Gen_3', out, [3, 512], bias=True, dil=self.dil)
out = self.conv('Gen_4', out, [3, 256], bias=True, dil=self.dil)
out = tf.concat([out, latent], axis=-1)
out = tf.reshape(out, [-1, 1, 1, out.get_shape().as_list()[-1]])
hnps, wnps = hnps - 4 * self.dil, wnps - 4 * self.dil
out = self.conv_transpose('Gen_8', out, [3, 256], 3, bias=True)
out = self.conv_transpose('Gen_9', out, [3, 128], 5, bias=True)
out = self.conv_transpose('Gen_10', out, [3, 64], 7, bias=True)
out = tf.image.resize_images(out, [13, 13], align_corners=True)
out = self.conv_transpose('Gen_11', out, [3, 32], 15, bias=True)
out = self.conv_transpose('Gen_12', out, [3, 16], 17, bias=True)
out = tf.image.resize_images(out, [33, 33], align_corners=True)
out = self.conv('Gen_13', out, [1, 1], bias=True, relu='tanh')
out = tf.reshape(out, [bsz, hnps, wnps, -1])
return out | prdepth/net/VAE.py | import tensorflow as tf
import numpy as np
from .net import Net
class VAE(Net):
def __init__(self, dil=1, latent_dim=128):
self.weights = {}
self.trainable = {}
self.dil = dil
self.latent_dim = latent_dim
def conv(self, name, inp, ksz, stride=1, bias=True, relu='relu', dil=1):
out = super().conv(
name, inp, ksz, stride=stride, dil=dil,
bias=bias, relu=relu, pad='VALID', trainable=True)
return out
def conv_transpose(
self, name, inp, ksz, outsp,
stride=1, bias=True, pad='VALID', relu='relu'):
out = super().conv_transpose(
name, inp, ksz, outsp, stride=stride,
bias=bias, relu=relu, pad=pad, trainable=True)
return out
def maxpool(inp, ksz, pad='VALID', stride=1):
return tf.nn.pool(inp, [ksz, ksz], 'MAX', pad, [1, 1], [stride, stride])
def prior_net(self, feat):
bsz, hnps, wnps = feat.get_shape().as_list()[:3]
out = self.conv('Prior_1', feat, [1, 1024], bias=True)
out = self.conv('Prior_2', out, [1, 512], bias=True)
out = self.conv('Prior_3', out, [3, 512], bias=True, dil=self.dil)
out = self.conv('Prior_4', out, [3, 256], bias=True, dil=self.dil)
out = self.conv('Prior_5', out, [1, 256], bias=True)
out = self.conv('Prior_6', out, [1, 256], bias=True)
out = self.conv(
'Prior_7', out, [1, self.latent_dim * 2], bias=True, relu=False)
return out[..., :self.latent_dim], out[..., self.latent_dim:]
def posterior_net(self, feat, patch):
bsz, hnps, wnps, psz = patch.get_shape().as_list()
psz = int(np.sqrt(psz))
out = tf.reshape(patch, [-1, psz, psz, 1])
feat = self.conv(
'Posterior_0', feat, [3, 1024], bias=True, dil=self.dil)
feat = self.conv(
'Posterior_1', feat, [3, 256], bias=True, dil=self.dil)
out = self.conv(
'Posterior_2', out, [3, 8], stride=2, bias=True) # 16x16
out = self.conv(
'Posterior_3', out, [2, 16], stride=2, bias=True) # 8x8
out = self.conv(
'Posterior_4', out, [2, 32], stride=2, bias=True) # 4x4
out = self.conv(
'Posterior_5', out, [2, 64], stride=2, bias=True) # 2x2
out = tf.reshape(out, [bsz, hnps, wnps, -1])
out = tf.concat([out, feat], axis=-1)
out = self.conv('Posterior_6', out, [1, 1024], bias=True)
out = self.conv('Posterior_7', out, [1, 512], bias=True)
out = self.conv('Posterior_8', out, [1, 256], bias=True)
out = self.conv(
'Posterior_9', out, [1, self.latent_dim * 2], bias=True, relu=False)
return out[..., :self.latent_dim], out[..., self.latent_dim:]
def generate(self, feat, latent):
bsz, hnps, wnps = feat.get_shape().as_list()[:3]
out = self.conv('Gen_1', feat, [1, 1024], bias=True)
out = self.conv('Gen_2', out, [1, 512], bias=True)
out = self.conv('Gen_3', out, [3, 512], bias=True, dil=self.dil)
out = self.conv('Gen_4', out, [3, 256], bias=True, dil=self.dil)
out = tf.concat([out, latent], axis=-1)
out = tf.reshape(out, [-1, 1, 1, out.get_shape().as_list()[-1]])
hnps, wnps = hnps - 4 * self.dil, wnps - 4 * self.dil
out = self.conv_transpose('Gen_8', out, [3, 256], 3, bias=True)
out = self.conv_transpose('Gen_9', out, [3, 128], 5, bias=True)
out = self.conv_transpose('Gen_10', out, [3, 64], 7, bias=True)
out = tf.image.resize_images(out, [13, 13], align_corners=True)
out = self.conv_transpose('Gen_11', out, [3, 32], 15, bias=True)
out = self.conv_transpose('Gen_12', out, [3, 16], 17, bias=True)
out = tf.image.resize_images(out, [33, 33], align_corners=True)
out = self.conv('Gen_13', out, [1, 1], bias=True, relu='tanh')
out = tf.reshape(out, [bsz, hnps, wnps, -1])
return out | 0.868325 | 0.509459 |
import numpy as np
from sklearn.base import clone
from ._utils_boot import boot_manual, draw_weights
from ._utils import fit_predict, fit_predict_proba, tune_grid_search
def fit_iivm(y, x, d, z,
learner_g, learner_m, learner_r, all_smpls, dml_procedure, score,
n_rep=1, g0_params=None, g1_params=None, m_params=None, r0_params=None, r1_params=None,
trimming_threshold=1e-12, always_takers=True, never_takers=True):
n_obs = len(y)
thetas = np.zeros(n_rep)
ses = np.zeros(n_rep)
all_g_hat0 = list()
all_g_hat1 = list()
all_m_hat = list()
all_r_hat0 = list()
all_r_hat1 = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = fit_nuisance_iivm(
y, x, d, z,
learner_g, learner_m, learner_r, smpls,
g0_params=g0_params, g1_params=g1_params, m_params=m_params, r0_params=r0_params, r1_params=r1_params,
trimming_threshold=trimming_threshold, always_takers=always_takers, never_takers=never_takers)
all_g_hat0.append(g_hat0)
all_g_hat1.append(g_hat1)
all_m_hat.append(m_hat)
all_r_hat0.append(r_hat0)
all_r_hat1.append(r_hat1)
if dml_procedure == 'dml1':
thetas[i_rep], ses[i_rep] = iivm_dml1(y, x, d, z,
g_hat0, g_hat1, m_hat, r_hat0, r_hat1,
smpls, score)
else:
assert dml_procedure == 'dml2'
thetas[i_rep], ses[i_rep] = iivm_dml2(y, x, d, z,
g_hat0, g_hat1, m_hat, r_hat0, r_hat1,
smpls, score)
theta = np.median(thetas)
se = np.sqrt(np.median(np.power(ses, 2) * n_obs + np.power(thetas - theta, 2)) / n_obs)
res = {'theta': theta, 'se': se,
'thetas': thetas, 'ses': ses,
'all_g_hat0': all_g_hat0, 'all_g_hat1': all_g_hat1,
'all_m_hat': all_m_hat, 'all_r_hat0': all_r_hat0, 'all_r_hat1': all_r_hat1}
return res
def fit_nuisance_iivm(y, x, d, z, learner_g, learner_m, learner_r, smpls,
g0_params=None, g1_params=None, m_params=None, r0_params=None, r1_params=None,
trimming_threshold=1e-12, always_takers=True, never_takers=True):
ml_g0 = clone(learner_g)
train_cond0 = np.where(z == 0)[0]
g_hat0_list = fit_predict(y, x, ml_g0, g0_params, smpls,
train_cond=train_cond0)
ml_g1 = clone(learner_g)
train_cond1 = np.where(z == 1)[0]
g_hat1_list = fit_predict(y, x, ml_g1, g1_params, smpls,
train_cond=train_cond1)
ml_m = clone(learner_m)
m_hat_list = fit_predict_proba(z, x, ml_m, m_params, smpls,
trimming_threshold=trimming_threshold)
ml_r0 = clone(learner_r)
if always_takers:
r_hat0_list = fit_predict_proba(d, x, ml_r0, r0_params, smpls,
train_cond=train_cond0)
else:
r_hat0_list = []
for (_, test_index) in smpls:
r_hat0_list.append(np.zeros_like(d[test_index]))
ml_r1 = clone(learner_r)
if never_takers:
r_hat1_list = fit_predict_proba(d, x, ml_r1, r1_params, smpls,
train_cond=train_cond1)
else:
r_hat1_list = []
for (_, test_index) in smpls:
r_hat1_list.append(np.ones_like(d[test_index]))
return g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list
def tune_nuisance_iivm(y, x, d, z, ml_g, ml_m, ml_r, smpls, n_folds_tune,
param_grid_g, param_grid_m, param_grid_r,
always_takers=True, never_takers=True):
train_cond0 = np.where(z == 0)[0]
g0_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond0)
train_cond1 = np.where(z == 1)[0]
g1_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond1)
m_tune_res = tune_grid_search(z, x, ml_m, smpls, param_grid_m, n_folds_tune)
if always_takers:
r0_tune_res = tune_grid_search(d, x, ml_r, smpls, param_grid_r, n_folds_tune,
train_cond=train_cond0)
r0_best_params = [xx.best_params_ for xx in r0_tune_res]
else:
r0_best_params = None
if never_takers:
r1_tune_res = tune_grid_search(d, x, ml_r, smpls, param_grid_r, n_folds_tune,
train_cond=train_cond1)
r1_best_params = [xx.best_params_ for xx in r1_tune_res]
else:
r1_best_params = None
g0_best_params = [xx.best_params_ for xx in g0_tune_res]
g1_best_params = [xx.best_params_ for xx in g1_tune_res]
m_best_params = [xx.best_params_ for xx in m_tune_res]
return g0_best_params, g1_best_params, m_best_params, r0_best_params, r1_best_params
def compute_iivm_residuals(y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls):
u_hat0 = np.full_like(y, np.nan, dtype='float64')
u_hat1 = np.full_like(y, np.nan, dtype='float64')
w_hat0 = np.full_like(y, np.nan, dtype='float64')
w_hat1 = np.full_like(y, np.nan, dtype='float64')
g_hat0 = np.full_like(y, np.nan, dtype='float64')
g_hat1 = np.full_like(y, np.nan, dtype='float64')
r_hat0 = np.full_like(y, np.nan, dtype='float64')
r_hat1 = np.full_like(y, np.nan, dtype='float64')
m_hat = np.full_like(y, np.nan, dtype='float64')
for idx, (_, test_index) in enumerate(smpls):
u_hat0[test_index] = y[test_index] - g_hat0_list[idx]
u_hat1[test_index] = y[test_index] - g_hat1_list[idx]
w_hat0[test_index] = d[test_index] - r_hat0_list[idx]
w_hat1[test_index] = d[test_index] - r_hat1_list[idx]
g_hat0[test_index] = g_hat0_list[idx]
g_hat1[test_index] = g_hat1_list[idx]
m_hat[test_index] = m_hat_list[idx]
r_hat0[test_index] = r_hat0_list[idx]
r_hat1[test_index] = r_hat1_list[idx]
return u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1
def iivm_dml1(y, x, d, z, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls, score):
thetas = np.zeros(len(smpls))
n_obs = len(y)
u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = compute_iivm_residuals(
y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls)
for idx, (_, test_index) in enumerate(smpls):
thetas[idx] = iivm_orth(g_hat0[test_index], g_hat1[test_index],
m_hat[test_index],
r_hat0[test_index], r_hat1[test_index],
u_hat0[test_index], u_hat1[test_index],
w_hat0[test_index], w_hat1[test_index],
z[test_index], score)
theta_hat = np.mean(thetas)
if len(smpls) > 1:
se = np.sqrt(var_iivm(theta_hat, g_hat0, g_hat1,
m_hat, r_hat0, r_hat1,
u_hat0, u_hat1, w_hat0, w_hat1,
z, score, n_obs))
else:
assert len(smpls) == 1
test_index = smpls[0][1]
n_obs = len(test_index)
se = np.sqrt(var_iivm(theta_hat, g_hat0[test_index], g_hat1[test_index],
m_hat[test_index], r_hat0[test_index], r_hat1[test_index],
u_hat0[test_index], u_hat1[test_index], w_hat0[test_index], w_hat1[test_index],
z[test_index], score, n_obs))
return theta_hat, se
def iivm_dml2(y, x, d, z, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls, score):
n_obs = len(y)
u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = compute_iivm_residuals(
y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls)
theta_hat = iivm_orth(g_hat0, g_hat1, m_hat, r_hat0, r_hat1,
u_hat0, u_hat1, w_hat0, w_hat1, z, score)
se = np.sqrt(var_iivm(theta_hat, g_hat0, g_hat1,
m_hat, r_hat0, r_hat1,
u_hat0, u_hat1, w_hat0, w_hat1,
z, score, n_obs))
return theta_hat, se
def var_iivm(theta, g_hat0, g_hat1, m_hat, r_hat0, r_hat1, u_hat0, u_hat1, w_hat0, w_hat1, z, score, n_obs):
assert score == 'LATE'
var = 1/n_obs * np.mean(np.power(g_hat1 - g_hat0
+ np.divide(np.multiply(z, u_hat1), m_hat)
- np.divide(np.multiply(1.-z, u_hat0), 1.-m_hat)
- theta*(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat)), 2)) \
/ np.power(np.mean(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat)), 2)
return var
def iivm_orth(g_hat0, g_hat1, m_hat, r_hat0, r_hat1, u_hat0, u_hat1, w_hat0, w_hat1, z, score):
assert score == 'LATE'
res = np.mean(g_hat1 - g_hat0
+ np.divide(np.multiply(z, u_hat1), m_hat)
- np.divide(np.multiply(1.-z, u_hat0), 1.-m_hat)) \
/ np.mean(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat))
return res
def boot_iivm(y, d, z, thetas, ses, all_g_hat0, all_g_hat1, all_m_hat, all_r_hat0, all_r_hat1,
all_smpls, score, bootstrap, n_rep_boot,
n_rep=1, apply_cross_fitting=True):
all_boot_theta = list()
all_boot_t_stat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
if apply_cross_fitting:
n_obs = len(y)
else:
test_index = smpls[0][1]
n_obs = len(test_index)
weights = draw_weights(bootstrap, n_rep_boot, n_obs)
boot_theta, boot_t_stat = boot_iivm_single_split(
thetas[i_rep], y, d, z,
all_g_hat0[i_rep], all_g_hat1[i_rep], all_m_hat[i_rep], all_r_hat0[i_rep], all_r_hat1[i_rep],
smpls, score, ses[i_rep], weights, n_rep_boot, apply_cross_fitting)
all_boot_theta.append(boot_theta)
all_boot_t_stat.append(boot_t_stat)
boot_theta = np.hstack(all_boot_theta)
boot_t_stat = np.hstack(all_boot_t_stat)
return boot_theta, boot_t_stat
def boot_iivm_single_split(theta, y, d, z, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list,
smpls, score, se, weights, n_rep, apply_cross_fitting):
assert score == 'LATE'
u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = compute_iivm_residuals(
y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls)
if apply_cross_fitting:
J = np.mean(-(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1. - z, w_hat0), 1. - m_hat)))
else:
test_index = smpls[0][1]
J = np.mean(-(r_hat1[test_index] - r_hat0[test_index]
+ np.divide(np.multiply(z[test_index], w_hat1[test_index]), m_hat[test_index])
- np.divide(np.multiply(1. - z[test_index], w_hat0[test_index]),
1. - m_hat[test_index])))
psi = g_hat1 - g_hat0 \
+ np.divide(np.multiply(z, u_hat1), m_hat) \
- np.divide(np.multiply(1.-z, u_hat0), 1.-m_hat) \
- theta*(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat))
boot_theta, boot_t_stat = boot_manual(psi, J, smpls, se, weights, n_rep, apply_cross_fitting)
return boot_theta, boot_t_stat | doubleml/tests/_utils_iivm_manual.py | import numpy as np
from sklearn.base import clone
from ._utils_boot import boot_manual, draw_weights
from ._utils import fit_predict, fit_predict_proba, tune_grid_search
def fit_iivm(y, x, d, z,
learner_g, learner_m, learner_r, all_smpls, dml_procedure, score,
n_rep=1, g0_params=None, g1_params=None, m_params=None, r0_params=None, r1_params=None,
trimming_threshold=1e-12, always_takers=True, never_takers=True):
n_obs = len(y)
thetas = np.zeros(n_rep)
ses = np.zeros(n_rep)
all_g_hat0 = list()
all_g_hat1 = list()
all_m_hat = list()
all_r_hat0 = list()
all_r_hat1 = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = fit_nuisance_iivm(
y, x, d, z,
learner_g, learner_m, learner_r, smpls,
g0_params=g0_params, g1_params=g1_params, m_params=m_params, r0_params=r0_params, r1_params=r1_params,
trimming_threshold=trimming_threshold, always_takers=always_takers, never_takers=never_takers)
all_g_hat0.append(g_hat0)
all_g_hat1.append(g_hat1)
all_m_hat.append(m_hat)
all_r_hat0.append(r_hat0)
all_r_hat1.append(r_hat1)
if dml_procedure == 'dml1':
thetas[i_rep], ses[i_rep] = iivm_dml1(y, x, d, z,
g_hat0, g_hat1, m_hat, r_hat0, r_hat1,
smpls, score)
else:
assert dml_procedure == 'dml2'
thetas[i_rep], ses[i_rep] = iivm_dml2(y, x, d, z,
g_hat0, g_hat1, m_hat, r_hat0, r_hat1,
smpls, score)
theta = np.median(thetas)
se = np.sqrt(np.median(np.power(ses, 2) * n_obs + np.power(thetas - theta, 2)) / n_obs)
res = {'theta': theta, 'se': se,
'thetas': thetas, 'ses': ses,
'all_g_hat0': all_g_hat0, 'all_g_hat1': all_g_hat1,
'all_m_hat': all_m_hat, 'all_r_hat0': all_r_hat0, 'all_r_hat1': all_r_hat1}
return res
def fit_nuisance_iivm(y, x, d, z, learner_g, learner_m, learner_r, smpls,
g0_params=None, g1_params=None, m_params=None, r0_params=None, r1_params=None,
trimming_threshold=1e-12, always_takers=True, never_takers=True):
ml_g0 = clone(learner_g)
train_cond0 = np.where(z == 0)[0]
g_hat0_list = fit_predict(y, x, ml_g0, g0_params, smpls,
train_cond=train_cond0)
ml_g1 = clone(learner_g)
train_cond1 = np.where(z == 1)[0]
g_hat1_list = fit_predict(y, x, ml_g1, g1_params, smpls,
train_cond=train_cond1)
ml_m = clone(learner_m)
m_hat_list = fit_predict_proba(z, x, ml_m, m_params, smpls,
trimming_threshold=trimming_threshold)
ml_r0 = clone(learner_r)
if always_takers:
r_hat0_list = fit_predict_proba(d, x, ml_r0, r0_params, smpls,
train_cond=train_cond0)
else:
r_hat0_list = []
for (_, test_index) in smpls:
r_hat0_list.append(np.zeros_like(d[test_index]))
ml_r1 = clone(learner_r)
if never_takers:
r_hat1_list = fit_predict_proba(d, x, ml_r1, r1_params, smpls,
train_cond=train_cond1)
else:
r_hat1_list = []
for (_, test_index) in smpls:
r_hat1_list.append(np.ones_like(d[test_index]))
return g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list
def tune_nuisance_iivm(y, x, d, z, ml_g, ml_m, ml_r, smpls, n_folds_tune,
param_grid_g, param_grid_m, param_grid_r,
always_takers=True, never_takers=True):
train_cond0 = np.where(z == 0)[0]
g0_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond0)
train_cond1 = np.where(z == 1)[0]
g1_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond1)
m_tune_res = tune_grid_search(z, x, ml_m, smpls, param_grid_m, n_folds_tune)
if always_takers:
r0_tune_res = tune_grid_search(d, x, ml_r, smpls, param_grid_r, n_folds_tune,
train_cond=train_cond0)
r0_best_params = [xx.best_params_ for xx in r0_tune_res]
else:
r0_best_params = None
if never_takers:
r1_tune_res = tune_grid_search(d, x, ml_r, smpls, param_grid_r, n_folds_tune,
train_cond=train_cond1)
r1_best_params = [xx.best_params_ for xx in r1_tune_res]
else:
r1_best_params = None
g0_best_params = [xx.best_params_ for xx in g0_tune_res]
g1_best_params = [xx.best_params_ for xx in g1_tune_res]
m_best_params = [xx.best_params_ for xx in m_tune_res]
return g0_best_params, g1_best_params, m_best_params, r0_best_params, r1_best_params
def compute_iivm_residuals(y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls):
u_hat0 = np.full_like(y, np.nan, dtype='float64')
u_hat1 = np.full_like(y, np.nan, dtype='float64')
w_hat0 = np.full_like(y, np.nan, dtype='float64')
w_hat1 = np.full_like(y, np.nan, dtype='float64')
g_hat0 = np.full_like(y, np.nan, dtype='float64')
g_hat1 = np.full_like(y, np.nan, dtype='float64')
r_hat0 = np.full_like(y, np.nan, dtype='float64')
r_hat1 = np.full_like(y, np.nan, dtype='float64')
m_hat = np.full_like(y, np.nan, dtype='float64')
for idx, (_, test_index) in enumerate(smpls):
u_hat0[test_index] = y[test_index] - g_hat0_list[idx]
u_hat1[test_index] = y[test_index] - g_hat1_list[idx]
w_hat0[test_index] = d[test_index] - r_hat0_list[idx]
w_hat1[test_index] = d[test_index] - r_hat1_list[idx]
g_hat0[test_index] = g_hat0_list[idx]
g_hat1[test_index] = g_hat1_list[idx]
m_hat[test_index] = m_hat_list[idx]
r_hat0[test_index] = r_hat0_list[idx]
r_hat1[test_index] = r_hat1_list[idx]
return u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1
def iivm_dml1(y, x, d, z, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls, score):
thetas = np.zeros(len(smpls))
n_obs = len(y)
u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = compute_iivm_residuals(
y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls)
for idx, (_, test_index) in enumerate(smpls):
thetas[idx] = iivm_orth(g_hat0[test_index], g_hat1[test_index],
m_hat[test_index],
r_hat0[test_index], r_hat1[test_index],
u_hat0[test_index], u_hat1[test_index],
w_hat0[test_index], w_hat1[test_index],
z[test_index], score)
theta_hat = np.mean(thetas)
if len(smpls) > 1:
se = np.sqrt(var_iivm(theta_hat, g_hat0, g_hat1,
m_hat, r_hat0, r_hat1,
u_hat0, u_hat1, w_hat0, w_hat1,
z, score, n_obs))
else:
assert len(smpls) == 1
test_index = smpls[0][1]
n_obs = len(test_index)
se = np.sqrt(var_iivm(theta_hat, g_hat0[test_index], g_hat1[test_index],
m_hat[test_index], r_hat0[test_index], r_hat1[test_index],
u_hat0[test_index], u_hat1[test_index], w_hat0[test_index], w_hat1[test_index],
z[test_index], score, n_obs))
return theta_hat, se
def iivm_dml2(y, x, d, z, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls, score):
n_obs = len(y)
u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = compute_iivm_residuals(
y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls)
theta_hat = iivm_orth(g_hat0, g_hat1, m_hat, r_hat0, r_hat1,
u_hat0, u_hat1, w_hat0, w_hat1, z, score)
se = np.sqrt(var_iivm(theta_hat, g_hat0, g_hat1,
m_hat, r_hat0, r_hat1,
u_hat0, u_hat1, w_hat0, w_hat1,
z, score, n_obs))
return theta_hat, se
def var_iivm(theta, g_hat0, g_hat1, m_hat, r_hat0, r_hat1, u_hat0, u_hat1, w_hat0, w_hat1, z, score, n_obs):
assert score == 'LATE'
var = 1/n_obs * np.mean(np.power(g_hat1 - g_hat0
+ np.divide(np.multiply(z, u_hat1), m_hat)
- np.divide(np.multiply(1.-z, u_hat0), 1.-m_hat)
- theta*(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat)), 2)) \
/ np.power(np.mean(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat)), 2)
return var
def iivm_orth(g_hat0, g_hat1, m_hat, r_hat0, r_hat1, u_hat0, u_hat1, w_hat0, w_hat1, z, score):
assert score == 'LATE'
res = np.mean(g_hat1 - g_hat0
+ np.divide(np.multiply(z, u_hat1), m_hat)
- np.divide(np.multiply(1.-z, u_hat0), 1.-m_hat)) \
/ np.mean(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat))
return res
def boot_iivm(y, d, z, thetas, ses, all_g_hat0, all_g_hat1, all_m_hat, all_r_hat0, all_r_hat1,
all_smpls, score, bootstrap, n_rep_boot,
n_rep=1, apply_cross_fitting=True):
all_boot_theta = list()
all_boot_t_stat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
if apply_cross_fitting:
n_obs = len(y)
else:
test_index = smpls[0][1]
n_obs = len(test_index)
weights = draw_weights(bootstrap, n_rep_boot, n_obs)
boot_theta, boot_t_stat = boot_iivm_single_split(
thetas[i_rep], y, d, z,
all_g_hat0[i_rep], all_g_hat1[i_rep], all_m_hat[i_rep], all_r_hat0[i_rep], all_r_hat1[i_rep],
smpls, score, ses[i_rep], weights, n_rep_boot, apply_cross_fitting)
all_boot_theta.append(boot_theta)
all_boot_t_stat.append(boot_t_stat)
boot_theta = np.hstack(all_boot_theta)
boot_t_stat = np.hstack(all_boot_t_stat)
return boot_theta, boot_t_stat
def boot_iivm_single_split(theta, y, d, z, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list,
smpls, score, se, weights, n_rep, apply_cross_fitting):
assert score == 'LATE'
u_hat0, u_hat1, w_hat0, w_hat1, g_hat0, g_hat1, m_hat, r_hat0, r_hat1 = compute_iivm_residuals(
y, d, g_hat0_list, g_hat1_list, m_hat_list, r_hat0_list, r_hat1_list, smpls)
if apply_cross_fitting:
J = np.mean(-(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1. - z, w_hat0), 1. - m_hat)))
else:
test_index = smpls[0][1]
J = np.mean(-(r_hat1[test_index] - r_hat0[test_index]
+ np.divide(np.multiply(z[test_index], w_hat1[test_index]), m_hat[test_index])
- np.divide(np.multiply(1. - z[test_index], w_hat0[test_index]),
1. - m_hat[test_index])))
psi = g_hat1 - g_hat0 \
+ np.divide(np.multiply(z, u_hat1), m_hat) \
- np.divide(np.multiply(1.-z, u_hat0), 1.-m_hat) \
- theta*(r_hat1 - r_hat0
+ np.divide(np.multiply(z, w_hat1), m_hat)
- np.divide(np.multiply(1.-z, w_hat0), 1.-m_hat))
boot_theta, boot_t_stat = boot_manual(psi, J, smpls, se, weights, n_rep, apply_cross_fitting)
return boot_theta, boot_t_stat | 0.422028 | 0.292725 |
from classtime.logging import logging
logging = logging.getLogger(__name__) # pylint: disable=C0103
import re
class Schedule(object):
"""Represents a 5-day week of 24-hour days
Each day is split into 48 thirty-minute blocks
"""
NUM_BLOCKS = 24*2
"""Number of blocks in one day"""
NUM_DAYS = 5
DAYS = 'MTWRF'
"""Number of days in a week, and the letters representing each"""
OPEN = -1
"""Free time"""
BUSY = -2
"""Busy with a non-school activity"""
SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWx '
"""Symbols used for drawing a Schedule to the console"""
SELF_IS_WORSE = True
SELF_IS_BETTER = False
"""Semantic sorting constants"""
SIMILARITY_THRESHOLD = 1.00
"""Fraction which must be identical to be similar"""
DIFFERENCE_THRESHOLD = 1 - SIMILARITY_THRESHOLD
def __init__(self, sections=None, busy_times=None, preferences=None):
"""Creates a schedule with the given initial conditions
:param sections: one or more sections to include in the
class-schedule and the timetable
:type sections: section dict or list of section dicts
:param busy_times: one or more sections to include only
in the timetable
:type busy_times: section dict or list of section dicts
"""
self.timetable = [[Schedule.OPEN]*Schedule.NUM_BLOCKS
for _ in range(Schedule.NUM_DAYS)]
self.timetable_bitmap = [0 for _ in range(Schedule.NUM_DAYS)]
self.scorer = ScheduleScorer(self, preferences)
self.preferences = preferences
self.more_like_this = list()
self.sections = list()
self._add_initial_sections(sections)
self.busy_times = list()
self._add_initial_busy_times(busy_times)
def __repr__(self):
def timetable_repr(sched, indent):
all_time_columns = ' 0 1 2 3 4 5 6 7 8 9 A B C 1 2 3 4 5 6 7 8 9 A B '
time_columns = all_time_columns.replace('0 1 2 3 4 5 6 ', '')
block_offset = len(all_time_columns) - len(time_columns)
timetable = str()
timetable += ' '*indent
timetable += 'scores: ' + str(sched.scorer.read()) + '\n'
timetable += time_columns
for daynum, blocks in enumerate(sched.timetable):
timetable += '\n'
timetable += ' '*indent
timetable += '{}: '.format(Schedule.DAYS[daynum])
for block in blocks[block_offset:]:
timetable += Schedule.SYMBOLS[block]
return timetable + '\n'
retstr = '\n\n' + \
'==============\n' + \
' Schedule\n' + \
'==============\n' + \
timetable_repr(self, 0)
if self.more_like_this:
retstr += 'and {} more like this (similarity >= {})'.format(
len(self.more_like_this),
Schedule.SIMILARITY_THRESHOLD)
return retstr
def _add_initial_sections(self, sections):
"""Add sections when building a new :py:class:`Schedule`
:param sections: one or more sections to add
:type sections: section dict or list of section dicts
"""
if sections is not None:
if not isinstance(sections, list):
sections = [sections]
for section in sections:
self.add_section(section)
def _add_initial_busy_times(self, busy_times):
"""Add busy_times when building a new :py:class:`Schedule`
:param busy_times: one or more busy_times to add
:type busy_times: section dict or list of section dicts
"""
if busy_times is not None:
if not isinstance(busy_times, list):
busy_times = [busy_times]
for busy_time in busy_times:
self.add_busy_time(busy_time)
def add_section(self, section):
"""Attempts to add a section to the timetable.
On success, adds it to the section list.
:param section: the section to add
:type section: section dict
If a section has null timetable info (day, startTime, endTime),
it will not be added.
"""
try:
self.attempt_add_to_timetable(section, len(self.sections))
except ValueError:
pass
self.sections.append(section)
return self
def add_busy_time(self, busy_time):
"""Attempts to add a busy_time to the timetable.
On success, adds it to the busy_time list.
:param busy_time: the busy_time to add
:type busy_time: section dict
If a busy_time has null timetable info (day, startTime, endTime),
it will not be added.
"""
try:
self.attempt_add_to_timetable(busy_time, Schedule.BUSY)
except ValueError:
logging.error('Failed to schedule busy time {}'.format(
busy_time))
else:
self.busy_times.append(busy_time)
return self
def conflicts(self, section):
"""Checks for a conflict between this :py:class:`Schedule`
and a section
:param section: the section to check for conflicts with
:type sections: section dict
:returns: whether it conflicts or not
:rtype: boolean
"""
if self._has_timetable_conflict(section):
return True
if self._has_dependency_conflict(section):
return True
return False
def _has_timetable_conflict(self, section):
other = Schedule(section)
for day in range(Schedule.NUM_DAYS):
if other.timetable_bitmap[day] & self.timetable_bitmap[day] != 0:
return True
return False
def _has_dependency_conflict(self, section):
potential_dependencies = [other
for other in self.sections
if other.get('course') == section.get('course')
and other.get('component') != section.get('component')]
for other in potential_dependencies:
if section.get('autoEnroll') is None \
and other.get('autoEnroll') is None:
continue
if section.get('component') != other.get('autoEnrollComponent') \
and section.get('autoEnrollComponent') != other.get('component'):
continue
if section.get('autoEnroll') == other.get('section') \
or section.get('section') == other.get('autoEnroll'):
continue
return True
return False
def is_similar(self, other):
return self._similarity(other) >= Schedule.SIMILARITY_THRESHOLD
def _similarity(self, other):
return 1 - self._difference(other)
def _difference(self, other):
_difference = 0.0
_scheduled_blocks = sum([bin(day).count('1')
for day in self.timetable_bitmap])
for day in range(Schedule.NUM_DAYS):
xordiff = other.timetable_bitmap[day] ^ self.timetable_bitmap[day]
# each real block difference produces two 1's in the xordiff
_difference += bin(xordiff).count('1') / 2.0
if not _scheduled_blocks:
_other_scheduled_blocks = sum([bin(day).count('1')
for day in other.timetable_bitmap])
return _other_scheduled_blocks # guard against div by zero
return 1.0 * _difference / _scheduled_blocks
def num_similar_schedules(self):
return len(self.more_like_this)
def attempt_add_to_timetable(self, section, section_num):
"""Attempts to add a section to the timetable
:param section: the section to add
:type section: section dict
:param int section_num: the index of :py:attr:`Schedule.SYMBOLS` to
represent this section with
:raises ValueError: if one or more of:
* day
* startTime
* endTime
is null
"""
days = section.get('day')
start = section.get('startTime')
end = section.get('endTime')
if None in [days, start, end]:
raise ValueError(section.get('class_', '??'))
start = Schedule._timestr_to_blocknum(start)
end = Schedule._timestr_to_blocknum(end)
for day in days:
self._add_to_timetable(day, start, end, section_num)
def _add_to_timetable(self, day, start, end, section_num):
"""Adds one or more blocks to the timetable
:param day: the timetable day to add to
:type day: str of length one
:param int start: the first block
:param int end: the last block (inclusive)
:param int section_num: the index of Schedule.SYMBOLS to
represent these blocks with
"""
daynum = Schedule._daystr_to_daynum(day)
for block in range(start, end+1):
self.timetable_bitmap[daynum] |= 1 << (Schedule.NUM_BLOCKS-block-1)
self.timetable[daynum][block] = section_num
def clone(self):
"""Clones this schedule
:returns: a new schedule with identical
* section list
* busy_time list
* timetable
* preferences
:rtype: Schedule
"""
return Schedule(sections=self.sections,
busy_times=self.busy_times,
preferences=self.preferences)
def __lt__(self, other):
if len(self.sections) > len(other.sections):
return Schedule.SELF_IS_BETTER
elif len(self.sections) < len(other.sections):
return Schedule.SELF_IS_WORSE
if self.overall_score() < other.overall_score():
return Schedule.SELF_IS_WORSE
else:
return Schedule.SELF_IS_BETTER
def overall_score(self):
return self.scorer.read('overall')
@staticmethod
def _timestr_to_blocknum(time):
"""Converts a time string to a block number
:param str time: string in :ref:`time format <time-format>`
:returns: block number this time is inside of
:rtype: int
:raises ValueError: if time does not match
:ref:`time format <time-format>`
"""
if not isinstance(time, str):
time = str(time)
match = re.search(r'(\d\d):(\d\d) (\w\w)', time)
if match is None:
raise ValueError(r'time must match "\d\d:\d\d [AP]M')
hour = int(match.group(1))
minute = int(match.group(2))
ampm_offset = 0
if hour != 12 and match.group(3) == 'PM':
ampm_offset = 12
block = (hour+ampm_offset)*2 + minute/30
return block
@staticmethod
def _daystr_to_daynum(day):
"""Converts a day string to a day number
:param day: day in Schedule.DAYS
:type day: str of length one
:returns: day number this day string represents
:raises ValueError: if day is not in Schedule.DAYS
"""
if day not in Schedule.DAYS:
raise ValueError('day must be in "{}"'.format(Schedule.DAYS))
return Schedule.DAYS.index(day)
class ScheduleScorer(object):
"""Scores a schedule using a suite of scoring functions
"""
def __init__(self, schedule, preferences=None):
"""Creates a new ScheduleScorer to score the given schedule
:param Schedule schedule: the schedule to be scored
"""
self.schedule = schedule
self.score_values = dict()
if preferences is None:
preferences = dict()
for preference in ['no-marathons', 'day-classes', 'start-early']:
if preference not in preferences or preferences[preference] is None:
preferences[preference] = 1
self.score_info = {
'no-marathons': {
'weight': preferences.get('no-marathons', 1),
'function': self._no_marathons
},
'day-classes': {
'weight': preferences.get('day-classes', 1),
'function': self._day_classes
},
'start-early': {
'weight': preferences.get('start-early', 1),
'function': self._start_early
}
}
def read(self, name='all'):
"""Returns a particular score, or all scores
:param str name: the name of a particular scoring function.
Defaults to 'all', which returns a dictionary
of all scoring functions and their values.
**Special value:** 'overall', which is a
weighted sum of all scores.
"""
self._update()
if name == 'all':
return self.score_values
else:
return self.score_values.get(name)
def _update(self):
"""Update all scores by calculating them individually
Also calculates 'overall', which is a weighted sum of all
scoring functions.
"""
self.score_values['overall'] = 0
if not len(self.schedule.sections):
return
for name in self.score_info.keys():
self.score_values.update({
name: self._weight(name) * self._score(name)
})
self.score_values['overall'] = sum(self.score_values.values())
def _weight(self, name):
"""Return the weight of a particular scoring function
:param str name: the name of the scoring function
"""
info = self.score_info.get(name)
if info is not None:
return info.get('weight', 1)
return None
def _score(self, name):
"""Run a particular scoring function, and return its result
:param str name: the name of the scoring function
"""
info = self.score_info.get(name)
if info is not None:
if info.get('weight', 0) == 0:
return 0
return info.get('function', lambda: 0)()
else:
return 0
def _no_marathons(self):
"""Scores based on the class spread throughout the day
* + weight: spread out. More breaks in between classes
* 0 weight: -no effect-
* - weight: clumped up. Less breaks in between classes
"""
_decent_average_length = 4 # 2 blocks per hour
def average_session(day_timetable):
session_length = 0
session_lengths = 0
num_sessions = 0
for block in day_timetable:
if block != Schedule.OPEN:
session_length += 1
else:
session_lengths += session_length
num_sessions += 1
session_length = 0
if not num_sessions:
num_session = 1 # guard against div by zero
return (1.0 * session_lengths) / num_sessions
_decent_sum_of_longest = 2 * 3 * 5 # 2block/hr, 3 hours, 5 days
def longest_session(day_bitmap):
longest_marathon = 0
while day_bitmap:
day_bitmap &= (day_bitmap << 1)
longest_marathon += 1
return longest_marathon
maxes = [longest_session(day_bitmap)
for day_bitmap in self.schedule.timetable_bitmap]
avges = [average_session(day_timetable)
for day_timetable in self.schedule.timetable]
sum_of_longest = sum(maxes)
average_length = sum(avges) / len(avges)
# decent - actual, since smaller values are better
score = 0
score += _decent_sum_of_longest - sum_of_longest
score += _decent_average_length - average_length
return 0.5 * score
def _day_classes(self):
"""Scores based on having day classes versus night classes
* + weight: classes end before 5pm
* 0 weight: -no effect-
* - weight: classes start at or after 5pm
"""
# 0 1 2 3 4 5 6 7 8 9 A B C 1 2 3 4 5 6 7 8 9 A B
night_zone = int('111111111111111100000000000000000011111111111111', 2)
_decent_avg_night_blocks = 0
def num_night_blocks(day_bitmap):
return bin(day_bitmap & night_zone).count('1')
night_blocks = [num_night_blocks(day_bitmap)
for day_bitmap in self.schedule.timetable_bitmap]
avg_night_blocks = 1.0 * sum(night_blocks) / len(night_blocks)
# decent - actual, because smaller values are better
score = 0
score += _decent_avg_night_blocks - avg_night_blocks
return 1.5 * score
def _start_early(self):
"""Scores based on starting early or late
* + weight: start early
* 0 weight: -no effect-
* - weight: start late
"""
_decent_early_start_block = 9*2 # 2 blocks per hour
def start_block(day_timetable):
for i, block in enumerate(day_timetable):
if block not in [Schedule.OPEN, Schedule.BUSY]:
return i
return None
start_blocks = [start_block(day_timetable)
for day_timetable in self.schedule.timetable]
start_blocks = [start_block for start_block in start_blocks
if start_block is not None]
if not len(start_blocks):
return 0 # guard against div by zero
avg_start_block = 1.0 * sum(start_blocks) / len(start_blocks)
# decent - actual
score = 0
score += _decent_early_start_block - avg_start_block
return score
# http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods#edit2155350
def _pickle_method(method):
"""Allow pickling of Schedule object
This is necessary for multiprocessing.Queue.put() and
multiprocessing.Queue.get()
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""Allow pickling of Schedule object
This is necessary for multiprocessing.Queue.put() and
multiprocessing.Queue.get()
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
import copy_reg
import types
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method) | classtime/brain/scheduling/schedule.py | from classtime.logging import logging
logging = logging.getLogger(__name__) # pylint: disable=C0103
import re
class Schedule(object):
"""Represents a 5-day week of 24-hour days
Each day is split into 48 thirty-minute blocks
"""
NUM_BLOCKS = 24*2
"""Number of blocks in one day"""
NUM_DAYS = 5
DAYS = 'MTWRF'
"""Number of days in a week, and the letters representing each"""
OPEN = -1
"""Free time"""
BUSY = -2
"""Busy with a non-school activity"""
SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWx '
"""Symbols used for drawing a Schedule to the console"""
SELF_IS_WORSE = True
SELF_IS_BETTER = False
"""Semantic sorting constants"""
SIMILARITY_THRESHOLD = 1.00
"""Fraction which must be identical to be similar"""
DIFFERENCE_THRESHOLD = 1 - SIMILARITY_THRESHOLD
def __init__(self, sections=None, busy_times=None, preferences=None):
"""Creates a schedule with the given initial conditions
:param sections: one or more sections to include in the
class-schedule and the timetable
:type sections: section dict or list of section dicts
:param busy_times: one or more sections to include only
in the timetable
:type busy_times: section dict or list of section dicts
"""
self.timetable = [[Schedule.OPEN]*Schedule.NUM_BLOCKS
for _ in range(Schedule.NUM_DAYS)]
self.timetable_bitmap = [0 for _ in range(Schedule.NUM_DAYS)]
self.scorer = ScheduleScorer(self, preferences)
self.preferences = preferences
self.more_like_this = list()
self.sections = list()
self._add_initial_sections(sections)
self.busy_times = list()
self._add_initial_busy_times(busy_times)
def __repr__(self):
def timetable_repr(sched, indent):
all_time_columns = ' 0 1 2 3 4 5 6 7 8 9 A B C 1 2 3 4 5 6 7 8 9 A B '
time_columns = all_time_columns.replace('0 1 2 3 4 5 6 ', '')
block_offset = len(all_time_columns) - len(time_columns)
timetable = str()
timetable += ' '*indent
timetable += 'scores: ' + str(sched.scorer.read()) + '\n'
timetable += time_columns
for daynum, blocks in enumerate(sched.timetable):
timetable += '\n'
timetable += ' '*indent
timetable += '{}: '.format(Schedule.DAYS[daynum])
for block in blocks[block_offset:]:
timetable += Schedule.SYMBOLS[block]
return timetable + '\n'
retstr = '\n\n' + \
'==============\n' + \
' Schedule\n' + \
'==============\n' + \
timetable_repr(self, 0)
if self.more_like_this:
retstr += 'and {} more like this (similarity >= {})'.format(
len(self.more_like_this),
Schedule.SIMILARITY_THRESHOLD)
return retstr
def _add_initial_sections(self, sections):
"""Add sections when building a new :py:class:`Schedule`
:param sections: one or more sections to add
:type sections: section dict or list of section dicts
"""
if sections is not None:
if not isinstance(sections, list):
sections = [sections]
for section in sections:
self.add_section(section)
def _add_initial_busy_times(self, busy_times):
"""Add busy_times when building a new :py:class:`Schedule`
:param busy_times: one or more busy_times to add
:type busy_times: section dict or list of section dicts
"""
if busy_times is not None:
if not isinstance(busy_times, list):
busy_times = [busy_times]
for busy_time in busy_times:
self.add_busy_time(busy_time)
def add_section(self, section):
"""Attempts to add a section to the timetable.
On success, adds it to the section list.
:param section: the section to add
:type section: section dict
If a section has null timetable info (day, startTime, endTime),
it will not be added.
"""
try:
self.attempt_add_to_timetable(section, len(self.sections))
except ValueError:
pass
self.sections.append(section)
return self
def add_busy_time(self, busy_time):
"""Attempts to add a busy_time to the timetable.
On success, adds it to the busy_time list.
:param busy_time: the busy_time to add
:type busy_time: section dict
If a busy_time has null timetable info (day, startTime, endTime),
it will not be added.
"""
try:
self.attempt_add_to_timetable(busy_time, Schedule.BUSY)
except ValueError:
logging.error('Failed to schedule busy time {}'.format(
busy_time))
else:
self.busy_times.append(busy_time)
return self
def conflicts(self, section):
"""Checks for a conflict between this :py:class:`Schedule`
and a section
:param section: the section to check for conflicts with
:type sections: section dict
:returns: whether it conflicts or not
:rtype: boolean
"""
if self._has_timetable_conflict(section):
return True
if self._has_dependency_conflict(section):
return True
return False
def _has_timetable_conflict(self, section):
other = Schedule(section)
for day in range(Schedule.NUM_DAYS):
if other.timetable_bitmap[day] & self.timetable_bitmap[day] != 0:
return True
return False
def _has_dependency_conflict(self, section):
potential_dependencies = [other
for other in self.sections
if other.get('course') == section.get('course')
and other.get('component') != section.get('component')]
for other in potential_dependencies:
if section.get('autoEnroll') is None \
and other.get('autoEnroll') is None:
continue
if section.get('component') != other.get('autoEnrollComponent') \
and section.get('autoEnrollComponent') != other.get('component'):
continue
if section.get('autoEnroll') == other.get('section') \
or section.get('section') == other.get('autoEnroll'):
continue
return True
return False
def is_similar(self, other):
return self._similarity(other) >= Schedule.SIMILARITY_THRESHOLD
def _similarity(self, other):
return 1 - self._difference(other)
def _difference(self, other):
_difference = 0.0
_scheduled_blocks = sum([bin(day).count('1')
for day in self.timetable_bitmap])
for day in range(Schedule.NUM_DAYS):
xordiff = other.timetable_bitmap[day] ^ self.timetable_bitmap[day]
# each real block difference produces two 1's in the xordiff
_difference += bin(xordiff).count('1') / 2.0
if not _scheduled_blocks:
_other_scheduled_blocks = sum([bin(day).count('1')
for day in other.timetable_bitmap])
return _other_scheduled_blocks # guard against div by zero
return 1.0 * _difference / _scheduled_blocks
def num_similar_schedules(self):
return len(self.more_like_this)
def attempt_add_to_timetable(self, section, section_num):
"""Attempts to add a section to the timetable
:param section: the section to add
:type section: section dict
:param int section_num: the index of :py:attr:`Schedule.SYMBOLS` to
represent this section with
:raises ValueError: if one or more of:
* day
* startTime
* endTime
is null
"""
days = section.get('day')
start = section.get('startTime')
end = section.get('endTime')
if None in [days, start, end]:
raise ValueError(section.get('class_', '??'))
start = Schedule._timestr_to_blocknum(start)
end = Schedule._timestr_to_blocknum(end)
for day in days:
self._add_to_timetable(day, start, end, section_num)
def _add_to_timetable(self, day, start, end, section_num):
"""Adds one or more blocks to the timetable
:param day: the timetable day to add to
:type day: str of length one
:param int start: the first block
:param int end: the last block (inclusive)
:param int section_num: the index of Schedule.SYMBOLS to
represent these blocks with
"""
daynum = Schedule._daystr_to_daynum(day)
for block in range(start, end+1):
self.timetable_bitmap[daynum] |= 1 << (Schedule.NUM_BLOCKS-block-1)
self.timetable[daynum][block] = section_num
def clone(self):
"""Clones this schedule
:returns: a new schedule with identical
* section list
* busy_time list
* timetable
* preferences
:rtype: Schedule
"""
return Schedule(sections=self.sections,
busy_times=self.busy_times,
preferences=self.preferences)
def __lt__(self, other):
if len(self.sections) > len(other.sections):
return Schedule.SELF_IS_BETTER
elif len(self.sections) < len(other.sections):
return Schedule.SELF_IS_WORSE
if self.overall_score() < other.overall_score():
return Schedule.SELF_IS_WORSE
else:
return Schedule.SELF_IS_BETTER
def overall_score(self):
return self.scorer.read('overall')
@staticmethod
def _timestr_to_blocknum(time):
"""Converts a time string to a block number
:param str time: string in :ref:`time format <time-format>`
:returns: block number this time is inside of
:rtype: int
:raises ValueError: if time does not match
:ref:`time format <time-format>`
"""
if not isinstance(time, str):
time = str(time)
match = re.search(r'(\d\d):(\d\d) (\w\w)', time)
if match is None:
raise ValueError(r'time must match "\d\d:\d\d [AP]M')
hour = int(match.group(1))
minute = int(match.group(2))
ampm_offset = 0
if hour != 12 and match.group(3) == 'PM':
ampm_offset = 12
block = (hour+ampm_offset)*2 + minute/30
return block
@staticmethod
def _daystr_to_daynum(day):
"""Converts a day string to a day number
:param day: day in Schedule.DAYS
:type day: str of length one
:returns: day number this day string represents
:raises ValueError: if day is not in Schedule.DAYS
"""
if day not in Schedule.DAYS:
raise ValueError('day must be in "{}"'.format(Schedule.DAYS))
return Schedule.DAYS.index(day)
class ScheduleScorer(object):
"""Scores a schedule using a suite of scoring functions
"""
def __init__(self, schedule, preferences=None):
"""Creates a new ScheduleScorer to score the given schedule
:param Schedule schedule: the schedule to be scored
"""
self.schedule = schedule
self.score_values = dict()
if preferences is None:
preferences = dict()
for preference in ['no-marathons', 'day-classes', 'start-early']:
if preference not in preferences or preferences[preference] is None:
preferences[preference] = 1
self.score_info = {
'no-marathons': {
'weight': preferences.get('no-marathons', 1),
'function': self._no_marathons
},
'day-classes': {
'weight': preferences.get('day-classes', 1),
'function': self._day_classes
},
'start-early': {
'weight': preferences.get('start-early', 1),
'function': self._start_early
}
}
def read(self, name='all'):
"""Returns a particular score, or all scores
:param str name: the name of a particular scoring function.
Defaults to 'all', which returns a dictionary
of all scoring functions and their values.
**Special value:** 'overall', which is a
weighted sum of all scores.
"""
self._update()
if name == 'all':
return self.score_values
else:
return self.score_values.get(name)
def _update(self):
"""Update all scores by calculating them individually
Also calculates 'overall', which is a weighted sum of all
scoring functions.
"""
self.score_values['overall'] = 0
if not len(self.schedule.sections):
return
for name in self.score_info.keys():
self.score_values.update({
name: self._weight(name) * self._score(name)
})
self.score_values['overall'] = sum(self.score_values.values())
def _weight(self, name):
"""Return the weight of a particular scoring function
:param str name: the name of the scoring function
"""
info = self.score_info.get(name)
if info is not None:
return info.get('weight', 1)
return None
def _score(self, name):
"""Run a particular scoring function, and return its result
:param str name: the name of the scoring function
"""
info = self.score_info.get(name)
if info is not None:
if info.get('weight', 0) == 0:
return 0
return info.get('function', lambda: 0)()
else:
return 0
def _no_marathons(self):
"""Scores based on the class spread throughout the day
* + weight: spread out. More breaks in between classes
* 0 weight: -no effect-
* - weight: clumped up. Less breaks in between classes
"""
_decent_average_length = 4 # 2 blocks per hour
def average_session(day_timetable):
session_length = 0
session_lengths = 0
num_sessions = 0
for block in day_timetable:
if block != Schedule.OPEN:
session_length += 1
else:
session_lengths += session_length
num_sessions += 1
session_length = 0
if not num_sessions:
num_session = 1 # guard against div by zero
return (1.0 * session_lengths) / num_sessions
_decent_sum_of_longest = 2 * 3 * 5 # 2block/hr, 3 hours, 5 days
def longest_session(day_bitmap):
longest_marathon = 0
while day_bitmap:
day_bitmap &= (day_bitmap << 1)
longest_marathon += 1
return longest_marathon
maxes = [longest_session(day_bitmap)
for day_bitmap in self.schedule.timetable_bitmap]
avges = [average_session(day_timetable)
for day_timetable in self.schedule.timetable]
sum_of_longest = sum(maxes)
average_length = sum(avges) / len(avges)
# decent - actual, since smaller values are better
score = 0
score += _decent_sum_of_longest - sum_of_longest
score += _decent_average_length - average_length
return 0.5 * score
def _day_classes(self):
"""Scores based on having day classes versus night classes
* + weight: classes end before 5pm
* 0 weight: -no effect-
* - weight: classes start at or after 5pm
"""
# 0 1 2 3 4 5 6 7 8 9 A B C 1 2 3 4 5 6 7 8 9 A B
night_zone = int('111111111111111100000000000000000011111111111111', 2)
_decent_avg_night_blocks = 0
def num_night_blocks(day_bitmap):
return bin(day_bitmap & night_zone).count('1')
night_blocks = [num_night_blocks(day_bitmap)
for day_bitmap in self.schedule.timetable_bitmap]
avg_night_blocks = 1.0 * sum(night_blocks) / len(night_blocks)
# decent - actual, because smaller values are better
score = 0
score += _decent_avg_night_blocks - avg_night_blocks
return 1.5 * score
def _start_early(self):
"""Scores based on starting early or late
* + weight: start early
* 0 weight: -no effect-
* - weight: start late
"""
_decent_early_start_block = 9*2 # 2 blocks per hour
def start_block(day_timetable):
for i, block in enumerate(day_timetable):
if block not in [Schedule.OPEN, Schedule.BUSY]:
return i
return None
start_blocks = [start_block(day_timetable)
for day_timetable in self.schedule.timetable]
start_blocks = [start_block for start_block in start_blocks
if start_block is not None]
if not len(start_blocks):
return 0 # guard against div by zero
avg_start_block = 1.0 * sum(start_blocks) / len(start_blocks)
# decent - actual
score = 0
score += _decent_early_start_block - avg_start_block
return score
# http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods#edit2155350
def _pickle_method(method):
"""Allow pickling of Schedule object
This is necessary for multiprocessing.Queue.put() and
multiprocessing.Queue.get()
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""Allow pickling of Schedule object
This is necessary for multiprocessing.Queue.put() and
multiprocessing.Queue.get()
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
import copy_reg
import types
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method) | 0.66061 | 0.261549 |
from torch import optim
from torch.nn import functional as F
import torch
from dataset.factory import DatasetModule
from domain.base import Module, Hyperparameters
from domain.metadata import Metadata
from model.factory import ModelModule
from logger import logger
from trainer.base import TrainerBase
from trainer.cnn_custom_trainer import CNNCustomTrainer
class TrainerModule(Module):
def __init__(self, metadata: Metadata, model_module: ModelModule, dataset_module: DatasetModule, *args, **kwargs):
super(TrainerModule, self).__init__(*args, **kwargs)
self.metadata = metadata
self.model_module = model_module
self.dataset_module = dataset_module
self.trainer: TrainerBase = None
# Create
self.create()
def create(self):
# trainer_factory = cls(model_name=model_name)
metadata = self.metadata
model_name = self.metadata.model_name
model_module = self.model_module
dataset_module = self.dataset_module
trainer = None
if model_name == "cnn_custom":
trainer = CNNCustomTrainer(
metadata=metadata,
model_module=model_module,
dataset_module=dataset_module,
hparams=TrainerModule.get_hyperparameters(model_name=model_name),
**self.arg
)
elif model_name == "model_1":
pass
# Set
self.trainer = trainer
logger.info(f"Trainer selected : '{trainer}'")
return self
"""
@TODO
Move & Modify this method
"""
@classmethod
def get_hyperparameters(cls, model_name):
hyperparameters = None
if model_name == "cnn_custom":
hyperparameters = Hyperparameters(
optimizer_cls=optim.Adam,
criterion=F.binary_cross_entropy,
n_epoch=5,
lr=1e-3,
hypothesis_threshold=0.5,
weight_decay=0,
# device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
elif model_name == "model_1":
pass
return hyperparameters
def do(self, mode):
logger.info(f"Start to {mode}")
result_dict = dict()
if mode == "train":
result_dict = self.trainer.train()
elif mode == "inference":
result_dict = self.trainer.predict()
logger.info(f"Completed to {mode}")
return result_dict | trainer/factory.py | from torch import optim
from torch.nn import functional as F
import torch
from dataset.factory import DatasetModule
from domain.base import Module, Hyperparameters
from domain.metadata import Metadata
from model.factory import ModelModule
from logger import logger
from trainer.base import TrainerBase
from trainer.cnn_custom_trainer import CNNCustomTrainer
class TrainerModule(Module):
def __init__(self, metadata: Metadata, model_module: ModelModule, dataset_module: DatasetModule, *args, **kwargs):
super(TrainerModule, self).__init__(*args, **kwargs)
self.metadata = metadata
self.model_module = model_module
self.dataset_module = dataset_module
self.trainer: TrainerBase = None
# Create
self.create()
def create(self):
# trainer_factory = cls(model_name=model_name)
metadata = self.metadata
model_name = self.metadata.model_name
model_module = self.model_module
dataset_module = self.dataset_module
trainer = None
if model_name == "cnn_custom":
trainer = CNNCustomTrainer(
metadata=metadata,
model_module=model_module,
dataset_module=dataset_module,
hparams=TrainerModule.get_hyperparameters(model_name=model_name),
**self.arg
)
elif model_name == "model_1":
pass
# Set
self.trainer = trainer
logger.info(f"Trainer selected : '{trainer}'")
return self
"""
@TODO
Move & Modify this method
"""
@classmethod
def get_hyperparameters(cls, model_name):
hyperparameters = None
if model_name == "cnn_custom":
hyperparameters = Hyperparameters(
optimizer_cls=optim.Adam,
criterion=F.binary_cross_entropy,
n_epoch=5,
lr=1e-3,
hypothesis_threshold=0.5,
weight_decay=0,
# device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
elif model_name == "model_1":
pass
return hyperparameters
def do(self, mode):
logger.info(f"Start to {mode}")
result_dict = dict()
if mode == "train":
result_dict = self.trainer.train()
elif mode == "inference":
result_dict = self.trainer.predict()
logger.info(f"Completed to {mode}")
return result_dict | 0.773388 | 0.288231 |
import matplotlib.pyplot as plt
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (128, 128, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(p = 0.5))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (128, 128),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (128, 128),
batch_size = 32,
class_mode = 'binary')
r = classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 15,
validation_data = test_set,
validation_steps = 2000)
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# plot the accuracy
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
classifier.save('cnn.h5') | cnn.py | import matplotlib.pyplot as plt
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (128, 128, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(p = 0.5))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (128, 128),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (128, 128),
batch_size = 32,
class_mode = 'binary')
r = classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 15,
validation_data = test_set,
validation_steps = 2000)
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# plot the accuracy
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
classifier.save('cnn.h5') | 0.915259 | 0.667825 |
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
from congress.cfg_validator import parsing
from congress.tests import base
LOG = logging.getLogger(__name__)
OPT_TEST = {
u'positional': False, u'kind': u'BoolOpt',
u'deprecated_reason': None,
u'help': u'Enables or disables inter-process locks.',
u'default': False, u'type': {u'type': u'Boolean'},
u'required': False, u'sample_default': None,
u'deprecated_opts': [{u'group': u'DEFAULT', u'name': None}],
u'deprecated_for_removal': False,
u'dest': u'disable_process_locking',
u'secret': False, u'short': None, u'mutable': False,
u'deprecated_since': None, u'metavar': None,
u'advanced': False, u'name': u'disable_process_locking'}
DICT_NS_TEST = {
u'DEFAULT': {u'object': None, u'namespaces': []},
u'oslo_concurrency': {
u'object': None,
u'namespaces': [[u'oslo.concurrency', [OPT_TEST]]]}}
class TestParsing(base.TestCase):
"""Tests for the unmarshaling of options by the driver"""
def test_add_namespace(self):
"""Test for adding a namespace"""
conf = cfg.ConfigOpts()
parsing.add_namespace(conf, DICT_NS_TEST, 'abcde-12345')
keys = conf.keys()
self.assertEqual(1, len(keys))
self.assertIn(u'oslo_concurrency', keys)
self.assertIsNotNone(
conf.get(u'oslo_concurrency').get(u'disable_process_locking'))
def test_construct_conf_manager(self):
"""Test for building a conf manager"""
conf = parsing.construct_conf_manager([DICT_NS_TEST])
self.assertIsInstance(conf, cfg.ConfigOpts)
keys = conf.keys()
self.assertEqual(1, len(keys))
self.assertIn(u'oslo_concurrency', keys)
def test_make_group(self):
"""Test for parsing a group"""
grp = parsing.make_group('group', 'group_title', 'group help')
self.assertIsInstance(grp, cfg.OptGroup)
self.assertEqual("group", grp.name)
self.assertEqual("group_title", grp.title)
def test_make_opt(self):
"""Test for parsing an option"""
descr = {
u'positional': False,
u'kind': u'Opt',
u'deprecated_reason': None,
u'help': u'Help me',
u'default': None,
u'type': {u'type': u'String'},
u'required': False, u'sample_default': None,
u'deprecated_opts': [], u'deprecated_for_removal': False,
u'dest': u'name',
u'secret': False,
u'short': None,
u'mutable': False,
u'deprecated_since': None,
u'metavar': None,
u'advanced': False,
u'name': u'name'}
opt = parsing.make_opt(descr, 'abcd-1234', 'efgh-5678')
self.assertIsInstance(opt, parsing.IdentifiedOpt)
self.assertEqual("name", opt.name)
self.assertEqual('abcd-1234', opt.id_)
self.assertEqual('efgh-5678', opt.ns_id)
def test_make_type(self):
"""Test for parsing a type"""
typ1 = parsing.make_type({u'type': u'String'})
self.assertIsInstance(typ1, types.String)
typ2 = parsing.make_type({u'type': u'Integer'})
self.assertIsInstance(typ2, types.Integer)
typ3 = parsing.make_type(
{u'item_type': {u'type': u'Boolean'}, u'type': u'List'})
self.assertIsInstance(typ3, types.List)
self.assertIsInstance(typ3.item_type, types.Boolean) | congress/tests/cfg_validator/test_parsing.py | from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
from congress.cfg_validator import parsing
from congress.tests import base
LOG = logging.getLogger(__name__)
OPT_TEST = {
u'positional': False, u'kind': u'BoolOpt',
u'deprecated_reason': None,
u'help': u'Enables or disables inter-process locks.',
u'default': False, u'type': {u'type': u'Boolean'},
u'required': False, u'sample_default': None,
u'deprecated_opts': [{u'group': u'DEFAULT', u'name': None}],
u'deprecated_for_removal': False,
u'dest': u'disable_process_locking',
u'secret': False, u'short': None, u'mutable': False,
u'deprecated_since': None, u'metavar': None,
u'advanced': False, u'name': u'disable_process_locking'}
DICT_NS_TEST = {
u'DEFAULT': {u'object': None, u'namespaces': []},
u'oslo_concurrency': {
u'object': None,
u'namespaces': [[u'oslo.concurrency', [OPT_TEST]]]}}
class TestParsing(base.TestCase):
"""Tests for the unmarshaling of options by the driver"""
def test_add_namespace(self):
"""Test for adding a namespace"""
conf = cfg.ConfigOpts()
parsing.add_namespace(conf, DICT_NS_TEST, 'abcde-12345')
keys = conf.keys()
self.assertEqual(1, len(keys))
self.assertIn(u'oslo_concurrency', keys)
self.assertIsNotNone(
conf.get(u'oslo_concurrency').get(u'disable_process_locking'))
def test_construct_conf_manager(self):
"""Test for building a conf manager"""
conf = parsing.construct_conf_manager([DICT_NS_TEST])
self.assertIsInstance(conf, cfg.ConfigOpts)
keys = conf.keys()
self.assertEqual(1, len(keys))
self.assertIn(u'oslo_concurrency', keys)
def test_make_group(self):
"""Test for parsing a group"""
grp = parsing.make_group('group', 'group_title', 'group help')
self.assertIsInstance(grp, cfg.OptGroup)
self.assertEqual("group", grp.name)
self.assertEqual("group_title", grp.title)
def test_make_opt(self):
"""Test for parsing an option"""
descr = {
u'positional': False,
u'kind': u'Opt',
u'deprecated_reason': None,
u'help': u'Help me',
u'default': None,
u'type': {u'type': u'String'},
u'required': False, u'sample_default': None,
u'deprecated_opts': [], u'deprecated_for_removal': False,
u'dest': u'name',
u'secret': False,
u'short': None,
u'mutable': False,
u'deprecated_since': None,
u'metavar': None,
u'advanced': False,
u'name': u'name'}
opt = parsing.make_opt(descr, 'abcd-1234', 'efgh-5678')
self.assertIsInstance(opt, parsing.IdentifiedOpt)
self.assertEqual("name", opt.name)
self.assertEqual('abcd-1234', opt.id_)
self.assertEqual('efgh-5678', opt.ns_id)
def test_make_type(self):
"""Test for parsing a type"""
typ1 = parsing.make_type({u'type': u'String'})
self.assertIsInstance(typ1, types.String)
typ2 = parsing.make_type({u'type': u'Integer'})
self.assertIsInstance(typ2, types.Integer)
typ3 = parsing.make_type(
{u'item_type': {u'type': u'Boolean'}, u'type': u'List'})
self.assertIsInstance(typ3, types.List)
self.assertIsInstance(typ3.item_type, types.Boolean) | 0.557845 | 0.198006 |
import BaseHTTPServer, SimpleHTTPServer
import ssl
import os
import base64
import threading
import sys
import random
import gzip
import io
# Config
PORT = 8000
CERT_FILE = '../server.pem'
currCmd = ""
logFileName = '../logs/logs.txt'
log_file = ""
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# Custom headers
def _set_headers(self):
self.send_header("Cache-Control", "private, max-age=0")
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Vary", "Accept-Encoding")
self.send_header("Connection", "close")
self.end_headers()
# GET events
def do_GET(self):
global currCmd
global log_file
if self.path.startswith("/search"):
self.send_response(200)
self._set_headers()
if currCmd != "":
if currCmd.startswith("FILED "):
filepath= currCmd[6:]
f = open(filepath,"rb")
contents = base64.b64encode(f.read())
f.close()
self.wfile.write(gzip_str("XXPADDINGXXPADDINGXXPADDINGXXFILED " + contents + "\r\n")[::-1])
else:
# padding, because if too short, gzip compress may contain plaintext
self.wfile.write(gzip_str("XXPADDINGXXPADDINGXXPADDINGXX" + currCmd + "\r\n")[::-1])
log_file.write("Sent cmd: " + currCmd + "\n")
log_file.flush()
currCmd = ""
currEncodedCmd = ""
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
# Save logs
def do_POST(self):
global log_file
if self.path.startswith("/search"):
content_length = int(self.headers['Content-Length'])
resp = gunzip_bytes_obj(self.rfile.read(content_length)[::-1])
resp = resp.replace("XXPADDINGXXPADDINGXXPADDINGXX","")
if resp == "EXITPROC OK.":
stop_server()
elif resp.startswith("FILEU "):
filebuffer = resp[6:]
contents = base64.b64decode(filebuffer)
f = open("file.dat","wb")
f.write(contents)
f.close()
else:
print(resp)
log_file.write("Rcv resp: " + resp + "\n")
log_file.flush()
self.send_response(200)
self._set_headers()
CancelWait()
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
def log_message(self, format, *args):
global log_file
log_file.write("%s - - [%s] %s\n" %(self.client_address[0],self.log_date_time_string(),format%args))
log_file.flush()
def gzip_str(string_):
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode='w') as fo:
fo.write(string_.encode())
bytes_obj = out.getvalue()
return bytes_obj
def gunzip_bytes_obj(bytes_obj):
in_ = io.BytesIO()
in_.write(bytes_obj)
in_.seek(0)
with gzip.GzipFile(fileobj=in_, mode='rb') as fo:
gunzipped_bytes_obj = fo.read()
return gunzipped_bytes_obj.decode()
def CancelWait():
global wait
wait = False
class Colors:
BLACK = "\033[0;30m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
BROWN = "\033[0;33m"
BLUE = "\033[0;34m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
LIGHT_GRAY = "\033[0;37m"
DARK_GRAY = "\033[1;30m"
LIGHT_RED = "\033[1;31m"
LIGHT_GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
LIGHT_BLUE = "\033[1;34m"
LIGHT_PURPLE = "\033[1;35m"
LIGHT_CYAN = "\033[1;36m"
LIGHT_WHITE = "\033[1;37m"
BOLD = "\033[1m"
FAINT = "\033[2m"
ITALIC = "\033[3m"
UNDERLINE = "\033[4m"
BLINK = "\033[5m"
NEGATIVE = "\033[7m"
CROSSED = "\033[9m"
END = "\033[0m"
if not __import__("sys").stdout.isatty():
for _ in dir():
if isinstance(_, str) and _[0] != "_":
locals()[_] = ""
else:
if __import__("platform").system() == "Windows":
kernel32 = __import__("ctypes").windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
del kernel32
# Start http server
def start_server():
global httpd
print(Colors.BLUE + '[!] Server listening on port ' + str(PORT) + ', waiting connection from client...' + Colors.END)
server_class = BaseHTTPServer.HTTPServer
MyHandler.server_version = "Microsoft-IIS/8.5"
MyHandler.sys_version = ""
httpd = server_class(('0.0.0.0', PORT), MyHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile=CERT_FILE, server_side=True)
httpd.serve_forever()
# Exit
def stop_server():
print(Colors.YELLOW + '[!] Exit' + Colors.END)
log_file.close()
os._exit(1)
if __name__ == '__main__':
try:
log_file = open(logFileName, 'a+')
# Start http server in separate thread
daemon = threading.Thread(target=start_server)
daemon.daemon = True
daemon.start()
print ""
while True:
wait = True
currCmd = raw_input("")
# Wait for client's reply
while (wait == True):
pass
except KeyboardInterrupt:
stop_server() | HBS_Server/www/HBS_Server.py |
import BaseHTTPServer, SimpleHTTPServer
import ssl
import os
import base64
import threading
import sys
import random
import gzip
import io
# Config
PORT = 8000
CERT_FILE = '../server.pem'
currCmd = ""
logFileName = '../logs/logs.txt'
log_file = ""
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# Custom headers
def _set_headers(self):
self.send_header("Cache-Control", "private, max-age=0")
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Vary", "Accept-Encoding")
self.send_header("Connection", "close")
self.end_headers()
# GET events
def do_GET(self):
global currCmd
global log_file
if self.path.startswith("/search"):
self.send_response(200)
self._set_headers()
if currCmd != "":
if currCmd.startswith("FILED "):
filepath= currCmd[6:]
f = open(filepath,"rb")
contents = base64.b64encode(f.read())
f.close()
self.wfile.write(gzip_str("XXPADDINGXXPADDINGXXPADDINGXXFILED " + contents + "\r\n")[::-1])
else:
# padding, because if too short, gzip compress may contain plaintext
self.wfile.write(gzip_str("XXPADDINGXXPADDINGXXPADDINGXX" + currCmd + "\r\n")[::-1])
log_file.write("Sent cmd: " + currCmd + "\n")
log_file.flush()
currCmd = ""
currEncodedCmd = ""
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
# Save logs
def do_POST(self):
global log_file
if self.path.startswith("/search"):
content_length = int(self.headers['Content-Length'])
resp = gunzip_bytes_obj(self.rfile.read(content_length)[::-1])
resp = resp.replace("XXPADDINGXXPADDINGXXPADDINGXX","")
if resp == "EXITPROC OK.":
stop_server()
elif resp.startswith("FILEU "):
filebuffer = resp[6:]
contents = base64.b64decode(filebuffer)
f = open("file.dat","wb")
f.write(contents)
f.close()
else:
print(resp)
log_file.write("Rcv resp: " + resp + "\n")
log_file.flush()
self.send_response(200)
self._set_headers()
CancelWait()
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
def log_message(self, format, *args):
global log_file
log_file.write("%s - - [%s] %s\n" %(self.client_address[0],self.log_date_time_string(),format%args))
log_file.flush()
def gzip_str(string_):
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode='w') as fo:
fo.write(string_.encode())
bytes_obj = out.getvalue()
return bytes_obj
def gunzip_bytes_obj(bytes_obj):
in_ = io.BytesIO()
in_.write(bytes_obj)
in_.seek(0)
with gzip.GzipFile(fileobj=in_, mode='rb') as fo:
gunzipped_bytes_obj = fo.read()
return gunzipped_bytes_obj.decode()
def CancelWait():
global wait
wait = False
class Colors:
BLACK = "\033[0;30m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
BROWN = "\033[0;33m"
BLUE = "\033[0;34m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
LIGHT_GRAY = "\033[0;37m"
DARK_GRAY = "\033[1;30m"
LIGHT_RED = "\033[1;31m"
LIGHT_GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
LIGHT_BLUE = "\033[1;34m"
LIGHT_PURPLE = "\033[1;35m"
LIGHT_CYAN = "\033[1;36m"
LIGHT_WHITE = "\033[1;37m"
BOLD = "\033[1m"
FAINT = "\033[2m"
ITALIC = "\033[3m"
UNDERLINE = "\033[4m"
BLINK = "\033[5m"
NEGATIVE = "\033[7m"
CROSSED = "\033[9m"
END = "\033[0m"
if not __import__("sys").stdout.isatty():
for _ in dir():
if isinstance(_, str) and _[0] != "_":
locals()[_] = ""
else:
if __import__("platform").system() == "Windows":
kernel32 = __import__("ctypes").windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
del kernel32
# Start http server
def start_server():
global httpd
print(Colors.BLUE + '[!] Server listening on port ' + str(PORT) + ', waiting connection from client...' + Colors.END)
server_class = BaseHTTPServer.HTTPServer
MyHandler.server_version = "Microsoft-IIS/8.5"
MyHandler.sys_version = ""
httpd = server_class(('0.0.0.0', PORT), MyHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile=CERT_FILE, server_side=True)
httpd.serve_forever()
# Exit
def stop_server():
print(Colors.YELLOW + '[!] Exit' + Colors.END)
log_file.close()
os._exit(1)
if __name__ == '__main__':
try:
log_file = open(logFileName, 'a+')
# Start http server in separate thread
daemon = threading.Thread(target=start_server)
daemon.daemon = True
daemon.start()
print ""
while True:
wait = True
currCmd = raw_input("")
# Wait for client's reply
while (wait == True):
pass
except KeyboardInterrupt:
stop_server() | 0.131912 | 0.043855 |
import os
from spack import *
class Mvdtool(CMakePackage):
"""MVD3 neuroscience file format parser and tool"""
homepage = "https://github.com/BlueBrain/MVDTool"
url = "https://github.com/BlueBrain/MVDTool.git"
git = "https://github.com/BlueBrain/MVDTool.git"
version('develop', git=url)
version('2.2.0', tag='v2.2.0', clean=False)
version('2.1.0', tag='v2.1.0', clean=False)
version('2.0.0', tag='v2.0.0', clean=False)
version('1.5.1', tag='v1.5.1')
version('1.5', tag='v1.5')
version('1.4', tag='v1.4')
variant('mpi', default=True, description="Enable MPI backend")
variant('python', default=False, description="Enable Python bindings")
depends_on('boost')
depends_on('cmake', type='build')
depends_on('py-setuptools-scm', type='build', when='@2:')
depends_on('py-setuptools', type='build', when='@2:')
depends_on('hdf5+mpi', when='+mpi')
depends_on('hdf5~mpi', when='~mpi')
depends_on('highfive+mpi', when='+mpi')
depends_on('highfive~mpi', when='~mpi')
depends_on('mpi', when='+mpi')
depends_on('libsonata+mpi', when='@2.1: +mpi')
depends_on('libsonata~mpi', when='@2.1: ~mpi')
depends_on('python', when='+python')
depends_on('py-cython', when='+python')
depends_on('py-numpy', when='+python')
def cmake_args(self):
args = []
if self.spec.satisfies('+mpi'):
args.extend([
'-DCMAKE_C_COMPILER:STRING={}'.format(self.spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER:STRING={}'.format(self.spec['mpi'].mpicxx),
])
if self.spec.satisfies('+python'):
args.extend([
'-DBUILD_PYTHON_BINDINGS:BOOL=ON'
])
return args
@when('+python')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
site_dir = self.spec['python'].package.site_packages_dir.split(os.sep)[1:]
for target in (self.prefix.lib, self.prefix.lib64):
pathname = os.path.join(target, *site_dir)
if os.path.isdir(pathname):
run_env.prepend_path('PYTHONPATH', pathname) | var/spack/repos/builtin/packages/mvdtool/package.py |
import os
from spack import *
class Mvdtool(CMakePackage):
"""MVD3 neuroscience file format parser and tool"""
homepage = "https://github.com/BlueBrain/MVDTool"
url = "https://github.com/BlueBrain/MVDTool.git"
git = "https://github.com/BlueBrain/MVDTool.git"
version('develop', git=url)
version('2.2.0', tag='v2.2.0', clean=False)
version('2.1.0', tag='v2.1.0', clean=False)
version('2.0.0', tag='v2.0.0', clean=False)
version('1.5.1', tag='v1.5.1')
version('1.5', tag='v1.5')
version('1.4', tag='v1.4')
variant('mpi', default=True, description="Enable MPI backend")
variant('python', default=False, description="Enable Python bindings")
depends_on('boost')
depends_on('cmake', type='build')
depends_on('py-setuptools-scm', type='build', when='@2:')
depends_on('py-setuptools', type='build', when='@2:')
depends_on('hdf5+mpi', when='+mpi')
depends_on('hdf5~mpi', when='~mpi')
depends_on('highfive+mpi', when='+mpi')
depends_on('highfive~mpi', when='~mpi')
depends_on('mpi', when='+mpi')
depends_on('libsonata+mpi', when='@2.1: +mpi')
depends_on('libsonata~mpi', when='@2.1: ~mpi')
depends_on('python', when='+python')
depends_on('py-cython', when='+python')
depends_on('py-numpy', when='+python')
def cmake_args(self):
args = []
if self.spec.satisfies('+mpi'):
args.extend([
'-DCMAKE_C_COMPILER:STRING={}'.format(self.spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER:STRING={}'.format(self.spec['mpi'].mpicxx),
])
if self.spec.satisfies('+python'):
args.extend([
'-DBUILD_PYTHON_BINDINGS:BOOL=ON'
])
return args
@when('+python')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
site_dir = self.spec['python'].package.site_packages_dir.split(os.sep)[1:]
for target in (self.prefix.lib, self.prefix.lib64):
pathname = os.path.join(target, *site_dir)
if os.path.isdir(pathname):
run_env.prepend_path('PYTHONPATH', pathname) | 0.349977 | 0.115986 |
import argparse
import datetime
import pathlib
import sys
import torch, torch.utils.tensorboard
import tqdm
import yaml
import model
import dataset
def main(mel_dir, embed_dir, dest_dir, config_path, model_path, weight_path):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config = yaml.load(config_path.read_text(), Loader=yaml.FullLoader)
run_id = datetime.datetime.now().strftime("%Y-%m-%d/%H-%M-%S")
dest_dir = dest_dir / run_id
dest_dir.mkdir(exist_ok=True, parents=True)
sw = torch.utils.tensorboard.SummaryWriter(dest_dir)
if model_path is not None:
net = torch.load(model_path).to(device)
torch.save(net, dest_dir / 'model.pt')
else:
net = model.AutoVC(config['autovc']['config']).to(device)
if weight_path is not None:
net.load_state_dict(torch.load(weight_path))
def creterion(src_mel, src_cnt, rec_mel, pst_mel, pst_cnt):
weight = config['autovc']['weight']
rec_loss = torch.nn.functional.mse_loss(rec_mel, src_mel)
pst_loss = torch.nn.functional.mse_loss(pst_mel, src_mel)
cnt_loss = torch.nn.functional.l1_loss(pst_cnt, src_cnt)
loss = weight['rec'] * rec_loss + weight['pst'] * pst_loss + weight['cnt'] * cnt_loss
return loss, (rec_loss, pst_loss, cnt_loss)
def train(net, optimizer, train_loader, epoch, sw):
net.train()
with tqdm.tqdm(train_loader) as pbar:
for step, (src_mel, src_emb) in enumerate(pbar):
src_mel = src_mel.to(device)
src_emb = src_emb.to(device)
optimizer.zero_grad()
src_cnt, rec_mel, pst_mel, pst_cnt = net(src_mel, src_emb)
loss, loss_detail = creterion(src_mel, src_cnt, rec_mel, pst_mel, pst_cnt)
loss.backward()
optimizer.step()
sw.add_scalar('loss', loss.item(), step + epoch * len(train_loader))
sw.add_scalar('rec_loss', loss_detail[0].item(), step + epoch * len(train_loader))
sw.add_scalar('pst_loss', loss_detail[1].item(), step + epoch * len(train_loader))
sw.add_scalar('cnt_loss', loss_detail[2].item(), step + epoch * len(train_loader))
pbar.set_description(f'Epoch {epoch}')
pbar.set_postfix(loss=loss.item())
train_loader = torch.utils.data.DataLoader(
dataset.MelEmbLoader(mel_dir, embed_dir, **config['data']),
batch_size=config['train']['batch_size'],
shuffle=True,
num_workers=config['train']['num_workers'],
pin_memory=False,
)
optimizer = torch.optim.Adam(net.parameters(), lr=config['train']['lr'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config['train']['lr_step'], gamma=config['train']['lr_gamma'])
for epoch in range(config['train']['epochs']):
train(net, optimizer, train_loader, epoch, sw)
scheduler.step()
torch.save(net.state_dict(), dest_dir / 'weight.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert wav to mel spectrogram')
parser.add_argument('mel_dir', type=pathlib.Path, help='path to directory of mel spectrograms')
parser.add_argument('embed_dir', type=pathlib.Path, help='path to directory of embeddings')
parser.add_argument('dest_dir', type=pathlib.Path, help='path to destination directory')
parser.add_argument('config_path', type=pathlib.Path, help='path to config')
parser.add_argument('--model_path', type=pathlib.Path, help='path to network model')
parser.add_argument('--weight_path', type=pathlib.Path, help='path to network weight')
if 'debugpy' in sys.modules:
args = parser.parse_args([
'vc3/mel-jvs',
'vc3/embed-jvs',
'vc3/train',
'vc3/training.yaml',
])
else:
args = parser.parse_args([])
main(**vars(args)) | vc3/training.py | import argparse
import datetime
import pathlib
import sys
import torch, torch.utils.tensorboard
import tqdm
import yaml
import model
import dataset
def main(mel_dir, embed_dir, dest_dir, config_path, model_path, weight_path):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config = yaml.load(config_path.read_text(), Loader=yaml.FullLoader)
run_id = datetime.datetime.now().strftime("%Y-%m-%d/%H-%M-%S")
dest_dir = dest_dir / run_id
dest_dir.mkdir(exist_ok=True, parents=True)
sw = torch.utils.tensorboard.SummaryWriter(dest_dir)
if model_path is not None:
net = torch.load(model_path).to(device)
torch.save(net, dest_dir / 'model.pt')
else:
net = model.AutoVC(config['autovc']['config']).to(device)
if weight_path is not None:
net.load_state_dict(torch.load(weight_path))
def creterion(src_mel, src_cnt, rec_mel, pst_mel, pst_cnt):
weight = config['autovc']['weight']
rec_loss = torch.nn.functional.mse_loss(rec_mel, src_mel)
pst_loss = torch.nn.functional.mse_loss(pst_mel, src_mel)
cnt_loss = torch.nn.functional.l1_loss(pst_cnt, src_cnt)
loss = weight['rec'] * rec_loss + weight['pst'] * pst_loss + weight['cnt'] * cnt_loss
return loss, (rec_loss, pst_loss, cnt_loss)
def train(net, optimizer, train_loader, epoch, sw):
net.train()
with tqdm.tqdm(train_loader) as pbar:
for step, (src_mel, src_emb) in enumerate(pbar):
src_mel = src_mel.to(device)
src_emb = src_emb.to(device)
optimizer.zero_grad()
src_cnt, rec_mel, pst_mel, pst_cnt = net(src_mel, src_emb)
loss, loss_detail = creterion(src_mel, src_cnt, rec_mel, pst_mel, pst_cnt)
loss.backward()
optimizer.step()
sw.add_scalar('loss', loss.item(), step + epoch * len(train_loader))
sw.add_scalar('rec_loss', loss_detail[0].item(), step + epoch * len(train_loader))
sw.add_scalar('pst_loss', loss_detail[1].item(), step + epoch * len(train_loader))
sw.add_scalar('cnt_loss', loss_detail[2].item(), step + epoch * len(train_loader))
pbar.set_description(f'Epoch {epoch}')
pbar.set_postfix(loss=loss.item())
train_loader = torch.utils.data.DataLoader(
dataset.MelEmbLoader(mel_dir, embed_dir, **config['data']),
batch_size=config['train']['batch_size'],
shuffle=True,
num_workers=config['train']['num_workers'],
pin_memory=False,
)
optimizer = torch.optim.Adam(net.parameters(), lr=config['train']['lr'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config['train']['lr_step'], gamma=config['train']['lr_gamma'])
for epoch in range(config['train']['epochs']):
train(net, optimizer, train_loader, epoch, sw)
scheduler.step()
torch.save(net.state_dict(), dest_dir / 'weight.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert wav to mel spectrogram')
parser.add_argument('mel_dir', type=pathlib.Path, help='path to directory of mel spectrograms')
parser.add_argument('embed_dir', type=pathlib.Path, help='path to directory of embeddings')
parser.add_argument('dest_dir', type=pathlib.Path, help='path to destination directory')
parser.add_argument('config_path', type=pathlib.Path, help='path to config')
parser.add_argument('--model_path', type=pathlib.Path, help='path to network model')
parser.add_argument('--weight_path', type=pathlib.Path, help='path to network weight')
if 'debugpy' in sys.modules:
args = parser.parse_args([
'vc3/mel-jvs',
'vc3/embed-jvs',
'vc3/train',
'vc3/training.yaml',
])
else:
args = parser.parse_args([])
main(**vars(args)) | 0.469277 | 0.129595 |
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from xgboost import XGBClassifier
class ConvenientXGBClassifier(XGBClassifier):
"""
XGBClassifier which has a `validation_fraction` parameter for splitting off a validation set just like i
SGDClassifier. In this class it's a fit_params parameter whereas for SGDClassifier it's a constructor argument.
"""
def _make_validation_split(self, y: np.array, validation_fraction: float):
"""Split the dataset between training set and validation set.
Largely copied from sklearn.linear_model._stochastic_gradient.BaseSGD._make_validation_split
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
validation_fraction: float between 0 and 1 to determine the size of the validation split
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
if not (0.0 < validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=0)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, idx_train.shape[0],
idx_val.shape[0]))
validation_mask[idx_val] = 1
return validation_mask.astype(bool)
def fit(self, X, y, sample_weight=None, base_margin=None, validation_fraction: float = 0.1, eval_metric=None,
early_stopping_rounds=None, verbose=True, xgb_model=None, sample_weight_eval_set=None, callbacks=None):
if early_stopping_rounds is not None:
validation_mask = self._make_validation_split(y, validation_fraction)
train_X = X[~validation_mask]
train_y = y[~validation_mask]
dev_X = X[validation_mask]
dev_y = y[validation_mask]
# eval_set: A list of (X, y) tuple pairs to use as validation sets, for which metrics will be computed.
eval_set = [(dev_X, dev_y)]
else:
train_X = X
train_y = y
eval_set = None
return super().fit(train_X, train_y, sample_weight, base_margin, eval_set, eval_metric, early_stopping_rounds,
verbose, xgb_model, sample_weight_eval_set, callbacks) | python/handwritten_baseline/pipeline/model/classifier_clustering/xgboost.py | import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from xgboost import XGBClassifier
class ConvenientXGBClassifier(XGBClassifier):
"""
XGBClassifier which has a `validation_fraction` parameter for splitting off a validation set just like i
SGDClassifier. In this class it's a fit_params parameter whereas for SGDClassifier it's a constructor argument.
"""
def _make_validation_split(self, y: np.array, validation_fraction: float):
"""Split the dataset between training set and validation set.
Largely copied from sklearn.linear_model._stochastic_gradient.BaseSGD._make_validation_split
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
validation_fraction: float between 0 and 1 to determine the size of the validation split
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
if not (0.0 < validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=0)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, idx_train.shape[0],
idx_val.shape[0]))
validation_mask[idx_val] = 1
return validation_mask.astype(bool)
def fit(self, X, y, sample_weight=None, base_margin=None, validation_fraction: float = 0.1, eval_metric=None,
early_stopping_rounds=None, verbose=True, xgb_model=None, sample_weight_eval_set=None, callbacks=None):
if early_stopping_rounds is not None:
validation_mask = self._make_validation_split(y, validation_fraction)
train_X = X[~validation_mask]
train_y = y[~validation_mask]
dev_X = X[validation_mask]
dev_y = y[validation_mask]
# eval_set: A list of (X, y) tuple pairs to use as validation sets, for which metrics will be computed.
eval_set = [(dev_X, dev_y)]
else:
train_X = X
train_y = y
eval_set = None
return super().fit(train_X, train_y, sample_weight, base_margin, eval_set, eval_metric, early_stopping_rounds,
verbose, xgb_model, sample_weight_eval_set, callbacks) | 0.935139 | 0.527317 |
from Individual import *
class Random_Problem:
def __init__(self):
pass
# Searches for solution to 8-puzzle through random technique
def random_solve(self, state):
print("\nSolving Randomly...")
if state.is_goal():
print("Root is solution! ", end='')
state.print_state()
return
while not state.is_goal():
list_of_moves = self.get_legal_actions(state)
# Choose a random move from the list of possible moves
rand_num = randint(0, len(list_of_moves) - 1)
rand_action = list_of_moves[rand_num]
state = self.get_new_state(state, rand_action)
print("Goal Found! ", end='')
state.print_state()
return
# Generate a new state given the old one and an action
def get_new_state(self, state, action):
zero_index = self.get_zero_index(state.get_tiles())
other_tile_index = None
if action == "UP":
other_tile_index = zero_index - 3
if action == "DOWN":
other_tile_index = zero_index + 3
if action == "LEFT":
other_tile_index = zero_index - 1
if action == "RIGHT":
other_tile_index = zero_index + 1
# Swap the tile values at the zero and destination indices
temp = state.get_tile_at_index(zero_index)
state.set_tile_at_index(zero_index, state.get_tile_at_index(other_tile_index))
state.set_tile_at_index(other_tile_index, temp)
return state
# Return list of all possible actions from the current state
def get_legal_actions(self, state):
list_of_actions = []
zero_index = self.get_zero_index(state.get_tiles())
if zero_index == 0:
list_of_actions.append("RIGHT")
list_of_actions.append("DOWN")
if zero_index == 1:
list_of_actions.append("LEFT")
list_of_actions.append("RIGHT")
list_of_actions.append("DOWN")
if zero_index == 2:
list_of_actions.append("LEFT")
list_of_actions.append("DOWN")
if zero_index == 3:
list_of_actions.append("UP")
list_of_actions.append("RIGHT")
list_of_actions.append("DOWN")
if zero_index == 4:
list_of_actions.append("LEFT")
list_of_actions.append("RIGHT")
list_of_actions.append("UP")
list_of_actions.append("DOWN")
if zero_index == 5:
list_of_actions.append("LEFT")
list_of_actions.append("UP")
list_of_actions.append("DOWN")
if zero_index == 6:
list_of_actions.append("UP")
list_of_actions.append("RIGHT")
if zero_index == 7:
list_of_actions.append("LEFT")
list_of_actions.append("UP")
list_of_actions.append("RIGHT")
if zero_index == 8:
list_of_actions.append("LEFT")
list_of_actions.append("UP")
return list_of_actions
# Find the location of the 'zero' tile
def get_zero_index(self, tiles):
zero_index = 0
for tile in tiles:
if tile == 0:
break
zero_index = zero_index + 1
return zero_index | Random_Problem.py | from Individual import *
class Random_Problem:
def __init__(self):
pass
# Searches for solution to 8-puzzle through random technique
def random_solve(self, state):
print("\nSolving Randomly...")
if state.is_goal():
print("Root is solution! ", end='')
state.print_state()
return
while not state.is_goal():
list_of_moves = self.get_legal_actions(state)
# Choose a random move from the list of possible moves
rand_num = randint(0, len(list_of_moves) - 1)
rand_action = list_of_moves[rand_num]
state = self.get_new_state(state, rand_action)
print("Goal Found! ", end='')
state.print_state()
return
# Generate a new state given the old one and an action
def get_new_state(self, state, action):
zero_index = self.get_zero_index(state.get_tiles())
other_tile_index = None
if action == "UP":
other_tile_index = zero_index - 3
if action == "DOWN":
other_tile_index = zero_index + 3
if action == "LEFT":
other_tile_index = zero_index - 1
if action == "RIGHT":
other_tile_index = zero_index + 1
# Swap the tile values at the zero and destination indices
temp = state.get_tile_at_index(zero_index)
state.set_tile_at_index(zero_index, state.get_tile_at_index(other_tile_index))
state.set_tile_at_index(other_tile_index, temp)
return state
# Return list of all possible actions from the current state
def get_legal_actions(self, state):
list_of_actions = []
zero_index = self.get_zero_index(state.get_tiles())
if zero_index == 0:
list_of_actions.append("RIGHT")
list_of_actions.append("DOWN")
if zero_index == 1:
list_of_actions.append("LEFT")
list_of_actions.append("RIGHT")
list_of_actions.append("DOWN")
if zero_index == 2:
list_of_actions.append("LEFT")
list_of_actions.append("DOWN")
if zero_index == 3:
list_of_actions.append("UP")
list_of_actions.append("RIGHT")
list_of_actions.append("DOWN")
if zero_index == 4:
list_of_actions.append("LEFT")
list_of_actions.append("RIGHT")
list_of_actions.append("UP")
list_of_actions.append("DOWN")
if zero_index == 5:
list_of_actions.append("LEFT")
list_of_actions.append("UP")
list_of_actions.append("DOWN")
if zero_index == 6:
list_of_actions.append("UP")
list_of_actions.append("RIGHT")
if zero_index == 7:
list_of_actions.append("LEFT")
list_of_actions.append("UP")
list_of_actions.append("RIGHT")
if zero_index == 8:
list_of_actions.append("LEFT")
list_of_actions.append("UP")
return list_of_actions
# Find the location of the 'zero' tile
def get_zero_index(self, tiles):
zero_index = 0
for tile in tiles:
if tile == 0:
break
zero_index = zero_index + 1
return zero_index | 0.50415 | 0.341706 |
import sys
import importlib
from pathlib import Path
from typing import Dict, List, Tuple
from types import ModuleType
from pii_manager import PiiEnum
from .exception import InvArgException
# Folder for language-independent tasks
TASK_ANY = "any"
# Name of the list that holds the pii tasks at each module
_LISTNAME = "PII_TASKS"
# --------------------------------------------------------------------------
_LANG = Path(__file__).parents[1] / "lang"
def build_subdict(
task_list: List[Tuple], lang: str = None, country: str = None
) -> Dict:
"""
Given a list of task tuples, build the task dict for them
"""
subdict = {}
for task in task_list:
# Checks
if not isinstance(task, tuple):
raise InvArgException(
"Error in tasklist for lang={}, country={}: element is not a tuple",
lang,
country,
)
if not isinstance(task[0], PiiEnum):
raise InvArgException(
"Error in tasklist for lang={}, country={}: need a PiiEnum in the first tuple element",
lang,
country,
)
# Add to dict
subdict[task[0].name] = (lang, country, *task)
return subdict
def _gather_piitasks(
pkg: ModuleType, path: str, lang: str, country: str, debug: bool = False
) -> List[Tuple]:
"""
Import and load all tasks defined in a module
"""
# Get the list of Python files in the module
modlist = (
m.stem
for m in Path(path).iterdir()
if m.suffix == ".py" and m.stem != "__init__"
)
# Get all tasks defined in those files
pii_tasks = {}
for mname in modlist:
mod = importlib.import_module("." + mname, pkg)
task_list = getattr(mod, _LISTNAME, None)
if task_list:
pii_tasks.update(build_subdict(task_list, lang, country))
# If debug mode is on, print out the list
if debug:
if not pii_tasks:
print(".. NO PII TASKS for", pkg, file=sys.stderr)
else:
print(".. PII TASKS for", pkg, file=sys.stderr)
print(".. path =", path, file=sys.stderr)
for task_name, task in pii_tasks.items():
print(" ", task_name, "->", task[3], file=sys.stderr)
return pii_tasks
def import_processor(lang: str, country: str = None, debug: bool = False) -> Dict:
"""
Import all task processors available for a given lang & country
"""
if debug:
print(".. IMPORT FROM:", lang, "/", country, file=sys.stderr)
if lang == TASK_ANY:
name = TASK_ANY
path = _LANG / TASK_ANY
else:
if country is None:
country_elem = TASK_ANY
elif country in ("in", "is"):
country_elem = country + "_"
else:
country_elem = country
lang_elem = lang if lang not in ("is",) else lang + "_"
name = f"{lang_elem}.{country_elem}"
path = _LANG / lang_elem / country_elem
# mod = importlib.import_module('...lang.' + name, __name__)
return _gather_piitasks(
"pii_manager.lang." + name, path, lang, country, debug=debug
)
def _norm(elem: str) -> str:
"""
Strip away underscores used to avoid reserved Python words
"""
return elem[:-1] if elem.endswith("_") else elem
def country_list(lang: str) -> List[str]:
"""
Return all countries for a given language
"""
p = _LANG / lang
return [
_norm(d.name) for d in p.iterdir() if d.is_dir() and d.name != "__pycache__"
]
def language_list() -> List[str]:
return [
_norm(d.name) for d in _LANG.iterdir() if d.is_dir() and d.name != "__pycache__"
]
# --------------------------------------------------------------------------
_TASKS = None
def _gather_all_tasks(debug: bool = False):
"""
Build the list of all tasks
"""
global _TASKS
if debug:
print(".. DEFINED LANGUAGES:", " ".join(sorted(language_list())))
_TASKS = {}
for lang in language_list():
if lang == TASK_ANY:
_TASKS[lang] = import_processor(lang, debug=debug)
else:
_TASKS[lang] = {
country: import_processor(lang, country, debug)
for country in country_list(lang)
}
def get_taskdict(debug: bool = False) -> Dict:
"""
Return the dict holding all implemented pii tasks
"""
global _TASKS
if _TASKS is None:
_gather_all_tasks(debug)
return _TASKS | pii-manager/src/pii_manager/helper/taskdict.py | import sys
import importlib
from pathlib import Path
from typing import Dict, List, Tuple
from types import ModuleType
from pii_manager import PiiEnum
from .exception import InvArgException
# Folder for language-independent tasks
TASK_ANY = "any"
# Name of the list that holds the pii tasks at each module
_LISTNAME = "PII_TASKS"
# --------------------------------------------------------------------------
_LANG = Path(__file__).parents[1] / "lang"
def build_subdict(
task_list: List[Tuple], lang: str = None, country: str = None
) -> Dict:
"""
Given a list of task tuples, build the task dict for them
"""
subdict = {}
for task in task_list:
# Checks
if not isinstance(task, tuple):
raise InvArgException(
"Error in tasklist for lang={}, country={}: element is not a tuple",
lang,
country,
)
if not isinstance(task[0], PiiEnum):
raise InvArgException(
"Error in tasklist for lang={}, country={}: need a PiiEnum in the first tuple element",
lang,
country,
)
# Add to dict
subdict[task[0].name] = (lang, country, *task)
return subdict
def _gather_piitasks(
pkg: ModuleType, path: str, lang: str, country: str, debug: bool = False
) -> List[Tuple]:
"""
Import and load all tasks defined in a module
"""
# Get the list of Python files in the module
modlist = (
m.stem
for m in Path(path).iterdir()
if m.suffix == ".py" and m.stem != "__init__"
)
# Get all tasks defined in those files
pii_tasks = {}
for mname in modlist:
mod = importlib.import_module("." + mname, pkg)
task_list = getattr(mod, _LISTNAME, None)
if task_list:
pii_tasks.update(build_subdict(task_list, lang, country))
# If debug mode is on, print out the list
if debug:
if not pii_tasks:
print(".. NO PII TASKS for", pkg, file=sys.stderr)
else:
print(".. PII TASKS for", pkg, file=sys.stderr)
print(".. path =", path, file=sys.stderr)
for task_name, task in pii_tasks.items():
print(" ", task_name, "->", task[3], file=sys.stderr)
return pii_tasks
def import_processor(lang: str, country: str = None, debug: bool = False) -> Dict:
"""
Import all task processors available for a given lang & country
"""
if debug:
print(".. IMPORT FROM:", lang, "/", country, file=sys.stderr)
if lang == TASK_ANY:
name = TASK_ANY
path = _LANG / TASK_ANY
else:
if country is None:
country_elem = TASK_ANY
elif country in ("in", "is"):
country_elem = country + "_"
else:
country_elem = country
lang_elem = lang if lang not in ("is",) else lang + "_"
name = f"{lang_elem}.{country_elem}"
path = _LANG / lang_elem / country_elem
# mod = importlib.import_module('...lang.' + name, __name__)
return _gather_piitasks(
"pii_manager.lang." + name, path, lang, country, debug=debug
)
def _norm(elem: str) -> str:
"""
Strip away underscores used to avoid reserved Python words
"""
return elem[:-1] if elem.endswith("_") else elem
def country_list(lang: str) -> List[str]:
"""
Return all countries for a given language
"""
p = _LANG / lang
return [
_norm(d.name) for d in p.iterdir() if d.is_dir() and d.name != "__pycache__"
]
def language_list() -> List[str]:
return [
_norm(d.name) for d in _LANG.iterdir() if d.is_dir() and d.name != "__pycache__"
]
# --------------------------------------------------------------------------
_TASKS = None
def _gather_all_tasks(debug: bool = False):
"""
Build the list of all tasks
"""
global _TASKS
if debug:
print(".. DEFINED LANGUAGES:", " ".join(sorted(language_list())))
_TASKS = {}
for lang in language_list():
if lang == TASK_ANY:
_TASKS[lang] = import_processor(lang, debug=debug)
else:
_TASKS[lang] = {
country: import_processor(lang, country, debug)
for country in country_list(lang)
}
def get_taskdict(debug: bool = False) -> Dict:
"""
Return the dict holding all implemented pii tasks
"""
global _TASKS
if _TASKS is None:
_gather_all_tasks(debug)
return _TASKS | 0.509764 | 0.167491 |
import os
import logging
# Imports: third party
import pandas as pd
def save_mrns_and_csns_csv(
staging_dir: str,
hd5_dir: str,
adt: str,
first_mrn_index: int,
last_mrn_index: int,
overwrite_hd5: bool,
):
"""
Get unique MRNs and CSNs from ADT and save to patients.csv.
:param staging_dir: <str> Path to temporary staging directory.
:param hd5_dir: <str> Path to directory where hd5 files are stored.
:param adt: <str> Path to CSV containing ADT table.
:param first_mrn_index: <int> First index of desired MRNs.
:param last_mrn_index: <int> Last index of desired MRNs.
:param overwrite_hd5: <bool> Overwrite existing hd5 files.
"""
adt_df = pd.read_csv(adt).sort_values(by=["MRN"], ascending=True)
patients = adt_df[["MRN", "PatientEncounterID"]].drop_duplicates().dropna()
mrns = patients["MRN"].drop_duplicates()[first_mrn_index:last_mrn_index]
mrns_and_csns = patients[patients["MRN"].isin(mrns)]
if not overwrite_hd5 and os.path.isdir(hd5_dir):
hd5_mrns = [
int(hd5_mrn.split(".")[0])
for hd5_mrn in os.listdir(hd5_dir)
if hd5_mrn.endswith(".hd5")
]
mrns_and_csns = mrns_and_csns[~mrns_and_csns["MRN"].isin(hd5_mrns)]
mrns_and_csns_path = os.path.join(staging_dir, "patients.csv")
mrns_and_csns.to_csv(mrns_and_csns_path, index=False)
logging.info(f"Saved {mrns_and_csns_path}")
def get_files_in_directory(
directory: str,
file_extension: str,
departments_short_names: set = None,
) -> tuple:
"""
Given a path to a directory and a file extension, returns a list of full paths
to all files ending in the file extension, and a list of full paths to all files
that do not end in the file extension.
Optionally, limit search to a subset of departments.
"""
fpaths = []
not_fpaths = []
for root, dirs, files in os.walk(directory, topdown=True):
if departments_short_names is not None:
dirs[:] = [d for d in dirs if d in departments_short_names]
for file in files:
fpath = os.path.join(root, file)
if file.endswith(file_extension):
fpaths.append(fpath)
else:
not_fpaths.append(fpath)
return fpaths, not_fpaths | tensorize/utils.py | import os
import logging
# Imports: third party
import pandas as pd
def save_mrns_and_csns_csv(
staging_dir: str,
hd5_dir: str,
adt: str,
first_mrn_index: int,
last_mrn_index: int,
overwrite_hd5: bool,
):
"""
Get unique MRNs and CSNs from ADT and save to patients.csv.
:param staging_dir: <str> Path to temporary staging directory.
:param hd5_dir: <str> Path to directory where hd5 files are stored.
:param adt: <str> Path to CSV containing ADT table.
:param first_mrn_index: <int> First index of desired MRNs.
:param last_mrn_index: <int> Last index of desired MRNs.
:param overwrite_hd5: <bool> Overwrite existing hd5 files.
"""
adt_df = pd.read_csv(adt).sort_values(by=["MRN"], ascending=True)
patients = adt_df[["MRN", "PatientEncounterID"]].drop_duplicates().dropna()
mrns = patients["MRN"].drop_duplicates()[first_mrn_index:last_mrn_index]
mrns_and_csns = patients[patients["MRN"].isin(mrns)]
if not overwrite_hd5 and os.path.isdir(hd5_dir):
hd5_mrns = [
int(hd5_mrn.split(".")[0])
for hd5_mrn in os.listdir(hd5_dir)
if hd5_mrn.endswith(".hd5")
]
mrns_and_csns = mrns_and_csns[~mrns_and_csns["MRN"].isin(hd5_mrns)]
mrns_and_csns_path = os.path.join(staging_dir, "patients.csv")
mrns_and_csns.to_csv(mrns_and_csns_path, index=False)
logging.info(f"Saved {mrns_and_csns_path}")
def get_files_in_directory(
directory: str,
file_extension: str,
departments_short_names: set = None,
) -> tuple:
"""
Given a path to a directory and a file extension, returns a list of full paths
to all files ending in the file extension, and a list of full paths to all files
that do not end in the file extension.
Optionally, limit search to a subset of departments.
"""
fpaths = []
not_fpaths = []
for root, dirs, files in os.walk(directory, topdown=True):
if departments_short_names is not None:
dirs[:] = [d for d in dirs if d in departments_short_names]
for file in files:
fpath = os.path.join(root, file)
if file.endswith(file_extension):
fpaths.append(fpath)
else:
not_fpaths.append(fpath)
return fpaths, not_fpaths | 0.41834 | 0.298696 |
import torch as to
from copy import deepcopy
from sbi.inference import SNPE_C
from sbi import utils
import pyrado
from pyrado.sampling.sbi_embeddings import (
LastStepEmbedding,
)
from pyrado.algorithms.meta.npdr import NPDR
from pyrado.sampling.sbi_rollout_sampler import RolloutSamplerForSBI
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from pyrado.environments.pysim.quanser_qube import QQubeSwingUpSim
from pyrado.policies.special.environment_specific import QQubeSwingUpAndBalanceCtrl
from pyrado.logger.experiment import setup_experiment, save_dicts_to_yaml
from pyrado.utils.argparser import get_argparser
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Experiment (set seed before creating the modules)
ex_dir = setup_experiment(QQubeSwingUpSim.name, f"{NPDR.name}_{QQubeSwingUpAndBalanceCtrl.name}", "sim2sim")
# Set seed if desired
pyrado.set_seed(args.seed, verbose=True)
# Environments
env_sim_hparams = dict(dt=1 / 250.0, max_steps=1500)
env_sim = QQubeSwingUpSim(**env_sim_hparams)
env_sim = ActDelayWrapper(env_sim)
# Create a fake ground truth target domain
num_real_obs = 5
env_real = deepcopy(env_sim)
dp_nom = env_sim.get_nominal_domain_param()
env_real.domain_param = dict(
Mp=dp_nom["Mp"] * 1.2, Mr=dp_nom["Mr"] * 1.1, Lp=dp_nom["Lp"] * 0.8, Lr=dp_nom["Lr"] * 0.9
)
# randomizer = DomainRandomizer(
# NormalDomainParam(name="Dr", mean=dp_nom["Dr"] * 2.0, std=dp_nom["km"] / 10, clip_lo=0.0),
# NormalDomainParam(name="Dp", mean=dp_nom["Dp"] * 2.0, std=dp_nom["km"] / 10, clip_lo=0.0),
# NormalDomainParam(name="Rm", mean=dp_nom["Rm"] * 1.1, std=dp_nom["km"] / 50, clip_lo=0.0),
# NormalDomainParam(name="Km", mean=dp_nom["km"] * 0.9, std=dp_nom["km"] / 50, clip_lo=0.0),
# )
# env_real = DomainRandWrapperBuffer(env_real, randomizer)
# env_real.fill_buffer(num_real_obs)
# Behavioral policy
policy_hparam = dict(energy_gain=0.587, ref_energy=0.827)
policy = QQubeSwingUpAndBalanceCtrl(env_sim.spec, **policy_hparam)
# Define a mapping: index - domain parameter
# dp_mapping = {0: "act_delay"}
# dp_mapping = {0: "Mr", 1: "Mp", 2: "Lr", 3: "Lp"}
dp_mapping = {0: "Dr", 1: "Dp", 2: "Rm", 3: "km", 4: "Mr", 5: "Mp", 6: "Lr", 7: "Lp", 8: "g"}
# Prior and Posterior (normalizing flow)
prior_hparam = dict(
# low=to.tensor([0.0]),
# high=to.tensor([5.0]),
low=to.tensor(
[
1e-8,
1e-8,
dp_nom["Rm"] * 0.8,
dp_nom["km"] * 0.8,
dp_nom["Mr"] * 0.9,
dp_nom["Mp"] * 0.9,
dp_nom["Lr"] * 0.9,
dp_nom["Lp"] * 0.9,
dp_nom["g"] * 0.95,
]
),
high=to.tensor(
[
2 * 0.0015,
2 * 0.0005,
dp_nom["Rm"] * 1.2,
dp_nom["km"] * 1.2,
dp_nom["Mr"] * 1.1,
dp_nom["Mp"] * 1.1,
dp_nom["Lr"] * 1.1,
dp_nom["Lp"] * 1.1,
dp_nom["g"] * 1.05,
]
),
)
prior = utils.BoxUniform(**prior_hparam)
# Time series embedding
embedding_hparam = dict()
embedding = LastStepEmbedding(env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), **embedding_hparam)
# embedding_hparam = dict()
# embedding = AllStepsEmbedding(
# env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), env_sim.max_steps, **embedding_hparam
# )
# embedding_hparam = dict(downsampling_factor=1)
# embedding = BayesSimEmbedding(env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), **embedding_hparam)
# embedding_hparam = dict(downsampling_factor=1)
# embedding = DynamicTimeWarpingEmbedding(
# env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), **embedding_hparam
# )
# embedding_hparam = dict(hidden_size=5, num_recurrent_layers=1, output_size=7, downsampling_factor=10)
# embedding = RNNEmbedding(
# env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), env_sim.max_steps, **embedding_hparam
# )
# Posterior (normalizing flow)
posterior_hparam = dict(model="maf", hidden_features=50, num_transforms=5)
# Algorithm
algo_hparam = dict(
max_iter=1,
num_real_rollouts=1,
num_sim_per_round=200,
num_sbi_rounds=5,
simulation_batch_size=10,
normalize_posterior=False,
num_eval_samples=10,
num_segments=args.num_segments,
len_segments=args.len_segments,
posterior_hparam=posterior_hparam,
subrtn_sbi_training_hparam=dict(
num_atoms=10, # default: 10
training_batch_size=100, # default: 50
learning_rate=3e-4, # default: 5e-4
validation_fraction=0.2, # default: 0.1
stop_after_epochs=20, # default: 20
discard_prior_samples=False, # default: False
use_combined_loss=False, # default: False
retrain_from_scratch_each_round=False, # default: False
show_train_summary=False, # default: False
# max_num_epochs=5, # only use for debugging
),
subrtn_sbi_sampling_hparam=dict(sample_with_mcmc=False),
num_workers=8,
)
algo = NPDR(
ex_dir,
env_sim,
env_real,
policy,
dp_mapping,
prior,
SNPE_C,
embedding,
**algo_hparam,
)
# Save the hyper-parameters
save_dicts_to_yaml(
dict(env=env_sim_hparams, seed=args.seed),
dict(policy=policy_hparam, policy_name=policy.name),
dict(prior=prior_hparam),
dict(embedding=embedding_hparam, embedding_name=embedding.name),
dict(posterior_nn=posterior_hparam),
dict(algo=algo_hparam, algo_name=algo.name),
save_dir=ex_dir,
)
algo.train(seed=args.seed) | Pyrado/scripts/training/qq-su_npdr_sim2sim.py | import torch as to
from copy import deepcopy
from sbi.inference import SNPE_C
from sbi import utils
import pyrado
from pyrado.sampling.sbi_embeddings import (
LastStepEmbedding,
)
from pyrado.algorithms.meta.npdr import NPDR
from pyrado.sampling.sbi_rollout_sampler import RolloutSamplerForSBI
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from pyrado.environments.pysim.quanser_qube import QQubeSwingUpSim
from pyrado.policies.special.environment_specific import QQubeSwingUpAndBalanceCtrl
from pyrado.logger.experiment import setup_experiment, save_dicts_to_yaml
from pyrado.utils.argparser import get_argparser
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Experiment (set seed before creating the modules)
ex_dir = setup_experiment(QQubeSwingUpSim.name, f"{NPDR.name}_{QQubeSwingUpAndBalanceCtrl.name}", "sim2sim")
# Set seed if desired
pyrado.set_seed(args.seed, verbose=True)
# Environments
env_sim_hparams = dict(dt=1 / 250.0, max_steps=1500)
env_sim = QQubeSwingUpSim(**env_sim_hparams)
env_sim = ActDelayWrapper(env_sim)
# Create a fake ground truth target domain
num_real_obs = 5
env_real = deepcopy(env_sim)
dp_nom = env_sim.get_nominal_domain_param()
env_real.domain_param = dict(
Mp=dp_nom["Mp"] * 1.2, Mr=dp_nom["Mr"] * 1.1, Lp=dp_nom["Lp"] * 0.8, Lr=dp_nom["Lr"] * 0.9
)
# randomizer = DomainRandomizer(
# NormalDomainParam(name="Dr", mean=dp_nom["Dr"] * 2.0, std=dp_nom["km"] / 10, clip_lo=0.0),
# NormalDomainParam(name="Dp", mean=dp_nom["Dp"] * 2.0, std=dp_nom["km"] / 10, clip_lo=0.0),
# NormalDomainParam(name="Rm", mean=dp_nom["Rm"] * 1.1, std=dp_nom["km"] / 50, clip_lo=0.0),
# NormalDomainParam(name="Km", mean=dp_nom["km"] * 0.9, std=dp_nom["km"] / 50, clip_lo=0.0),
# )
# env_real = DomainRandWrapperBuffer(env_real, randomizer)
# env_real.fill_buffer(num_real_obs)
# Behavioral policy
policy_hparam = dict(energy_gain=0.587, ref_energy=0.827)
policy = QQubeSwingUpAndBalanceCtrl(env_sim.spec, **policy_hparam)
# Define a mapping: index - domain parameter
# dp_mapping = {0: "act_delay"}
# dp_mapping = {0: "Mr", 1: "Mp", 2: "Lr", 3: "Lp"}
dp_mapping = {0: "Dr", 1: "Dp", 2: "Rm", 3: "km", 4: "Mr", 5: "Mp", 6: "Lr", 7: "Lp", 8: "g"}
# Prior and Posterior (normalizing flow)
prior_hparam = dict(
# low=to.tensor([0.0]),
# high=to.tensor([5.0]),
low=to.tensor(
[
1e-8,
1e-8,
dp_nom["Rm"] * 0.8,
dp_nom["km"] * 0.8,
dp_nom["Mr"] * 0.9,
dp_nom["Mp"] * 0.9,
dp_nom["Lr"] * 0.9,
dp_nom["Lp"] * 0.9,
dp_nom["g"] * 0.95,
]
),
high=to.tensor(
[
2 * 0.0015,
2 * 0.0005,
dp_nom["Rm"] * 1.2,
dp_nom["km"] * 1.2,
dp_nom["Mr"] * 1.1,
dp_nom["Mp"] * 1.1,
dp_nom["Lr"] * 1.1,
dp_nom["Lp"] * 1.1,
dp_nom["g"] * 1.05,
]
),
)
prior = utils.BoxUniform(**prior_hparam)
# Time series embedding
embedding_hparam = dict()
embedding = LastStepEmbedding(env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), **embedding_hparam)
# embedding_hparam = dict()
# embedding = AllStepsEmbedding(
# env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), env_sim.max_steps, **embedding_hparam
# )
# embedding_hparam = dict(downsampling_factor=1)
# embedding = BayesSimEmbedding(env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), **embedding_hparam)
# embedding_hparam = dict(downsampling_factor=1)
# embedding = DynamicTimeWarpingEmbedding(
# env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), **embedding_hparam
# )
# embedding_hparam = dict(hidden_size=5, num_recurrent_layers=1, output_size=7, downsampling_factor=10)
# embedding = RNNEmbedding(
# env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), env_sim.max_steps, **embedding_hparam
# )
# Posterior (normalizing flow)
posterior_hparam = dict(model="maf", hidden_features=50, num_transforms=5)
# Algorithm
algo_hparam = dict(
max_iter=1,
num_real_rollouts=1,
num_sim_per_round=200,
num_sbi_rounds=5,
simulation_batch_size=10,
normalize_posterior=False,
num_eval_samples=10,
num_segments=args.num_segments,
len_segments=args.len_segments,
posterior_hparam=posterior_hparam,
subrtn_sbi_training_hparam=dict(
num_atoms=10, # default: 10
training_batch_size=100, # default: 50
learning_rate=3e-4, # default: 5e-4
validation_fraction=0.2, # default: 0.1
stop_after_epochs=20, # default: 20
discard_prior_samples=False, # default: False
use_combined_loss=False, # default: False
retrain_from_scratch_each_round=False, # default: False
show_train_summary=False, # default: False
# max_num_epochs=5, # only use for debugging
),
subrtn_sbi_sampling_hparam=dict(sample_with_mcmc=False),
num_workers=8,
)
algo = NPDR(
ex_dir,
env_sim,
env_real,
policy,
dp_mapping,
prior,
SNPE_C,
embedding,
**algo_hparam,
)
# Save the hyper-parameters
save_dicts_to_yaml(
dict(env=env_sim_hparams, seed=args.seed),
dict(policy=policy_hparam, policy_name=policy.name),
dict(prior=prior_hparam),
dict(embedding=embedding_hparam, embedding_name=embedding.name),
dict(posterior_nn=posterior_hparam),
dict(algo=algo_hparam, algo_name=algo.name),
save_dir=ex_dir,
)
algo.train(seed=args.seed) | 0.703651 | 0.251033 |
import re
import random
import hashlib
import base64
from iota import AsciiTrytesCodec
from config import TRITLI_SALT, SHORT_URL_LENGTH, SHORT_URL_CHARACTER_SET
# careful here: changes made here, will not be backwards compatible
def get_random_id():
return ''.join(random.SystemRandom().choice(SHORT_URL_CHARACTER_SET) for _ in range(SHORT_URL_LENGTH))
def hash_message(string_to_hash: str, with_passphrase: bool = True, custom_salt: str = None):
if with_passphrase:
if custom_salt:
string_to_hash = string_to_hash + custom_salt
else:
string_to_hash = string_to_hash + TRITLI_SALT
h = hashlib.sha256()
h.update(string_to_hash.encode('utf-8'))
return h.hexdigest()
def clean_string(string_to_clean, final_length):
# delete numbers not allowed in tag
cleaned_string = re.sub('\d', '9', string_to_clean)
# remove special characters
cleaned_string = re.sub('\W+', '9', cleaned_string)
# cut to the supported length of 27
if len(cleaned_string) > final_length:
cleaned_string = cleaned_string[:final_length]
# convert to uppercase and fill to 27 characters, if string is too short
cleaned_string = cleaned_string.upper().ljust(final_length, '9')
return cleaned_string
def prepare_tag(tag: str):
tag_length = 27
return clean_string(tag, tag_length)
def prepare_address(hash_string: str):
address_length = 81 - 1
# encode to base64 first
hashed_string = base64.b64encode(hash_string.encode("utf-8")).decode("utf-8")
return clean_string(hashed_string, address_length)
def prepare_address_tryte_hash(string_to_address: str):
address_length = 81 - 1
h = hashlib.sha256()
h.update(string_to_address.encode('utf-8'))
hash_bytes = h.digest()
codec = AsciiTrytesCodec()
hash_trytes = codec.encode(input=hash_bytes, errors="strict")[0]
hash_trytes_string = hash_trytes.decode("utf-8")
hash_trytes_string = hash_trytes_string.upper().ljust(address_length, '9')
return hash_trytes_string
def get_key(dictionary: dict, key: str):
if key in dictionary.keys():
return dictionary[key]
else:
return None | src/util/util.py | import re
import random
import hashlib
import base64
from iota import AsciiTrytesCodec
from config import TRITLI_SALT, SHORT_URL_LENGTH, SHORT_URL_CHARACTER_SET
# careful here: changes made here, will not be backwards compatible
def get_random_id():
return ''.join(random.SystemRandom().choice(SHORT_URL_CHARACTER_SET) for _ in range(SHORT_URL_LENGTH))
def hash_message(string_to_hash: str, with_passphrase: bool = True, custom_salt: str = None):
if with_passphrase:
if custom_salt:
string_to_hash = string_to_hash + custom_salt
else:
string_to_hash = string_to_hash + TRITLI_SALT
h = hashlib.sha256()
h.update(string_to_hash.encode('utf-8'))
return h.hexdigest()
def clean_string(string_to_clean, final_length):
# delete numbers not allowed in tag
cleaned_string = re.sub('\d', '9', string_to_clean)
# remove special characters
cleaned_string = re.sub('\W+', '9', cleaned_string)
# cut to the supported length of 27
if len(cleaned_string) > final_length:
cleaned_string = cleaned_string[:final_length]
# convert to uppercase and fill to 27 characters, if string is too short
cleaned_string = cleaned_string.upper().ljust(final_length, '9')
return cleaned_string
def prepare_tag(tag: str):
tag_length = 27
return clean_string(tag, tag_length)
def prepare_address(hash_string: str):
address_length = 81 - 1
# encode to base64 first
hashed_string = base64.b64encode(hash_string.encode("utf-8")).decode("utf-8")
return clean_string(hashed_string, address_length)
def prepare_address_tryte_hash(string_to_address: str):
address_length = 81 - 1
h = hashlib.sha256()
h.update(string_to_address.encode('utf-8'))
hash_bytes = h.digest()
codec = AsciiTrytesCodec()
hash_trytes = codec.encode(input=hash_bytes, errors="strict")[0]
hash_trytes_string = hash_trytes.decode("utf-8")
hash_trytes_string = hash_trytes_string.upper().ljust(address_length, '9')
return hash_trytes_string
def get_key(dictionary: dict, key: str):
if key in dictionary.keys():
return dictionary[key]
else:
return None | 0.404625 | 0.14016 |
import multiprocessing
import boto3
from kinesis.producer import AsyncProducer
class GEAsyncProducer(AsyncProducer):
"""
Overriden AsyncProducer from kinesis-python package.
Provides the ability to change the client setup as well, specifically the
endpoint_url.
"""
# https://github.com/NerdWalletOSS/kinesis-python/blob/master/src/kinesis/producer.py#L64
def __init__(self, stream_name, buffer_time, queue, max_count=None,
max_size=None, boto3_session=None, boto3_client_settings=None):
self.stream_name = stream_name
self.buffer_time = buffer_time
self.queue = queue
self.records = []
self.next_records = []
self.alive = True
self.max_count = max_count or self.MAX_COUNT
self.max_size = max_size or self.MAX_SIZE
boto3_client_settings = boto3_client_settings or {}
if boto3_session is None:
boto3_session = boto3.Session()
client_settings = {"service_name": "kinesis"}
client_settings.update(boto3_client_settings)
self.client = boto3_session.client(**client_settings)
self.start()
# Based on https://github.com/NerdWalletOSS/kinesis-python/blob/master/src/kinesis/producer.py:class KinesisProducer
class GEKinesisProducer:
def __init__(self, stream_name, buffer_time=0.5, max_count=None,
max_size=None, boto3_session=None, boto3_client_settings=None, producer_class=None, queue=None):
self.queue = queue or multiprocessing.Queue()
kwargs = {
"stream_name": stream_name,
"buffer_time": buffer_time,
"queue": self.queue,
"max_count": max_count,
"max_size": max_size,
"boto3_session": boto3_session,
"boto3_client_settings": boto3_client_settings
}
self.async_producer = GEAsyncProducer(**kwargs)
def put(self, data, explicit_hash_key=None, partition_key=None):
self.queue.put((data, explicit_hash_key, partition_key)) | kinesis_conducer/producers/producer.py | import multiprocessing
import boto3
from kinesis.producer import AsyncProducer
class GEAsyncProducer(AsyncProducer):
"""
Overriden AsyncProducer from kinesis-python package.
Provides the ability to change the client setup as well, specifically the
endpoint_url.
"""
# https://github.com/NerdWalletOSS/kinesis-python/blob/master/src/kinesis/producer.py#L64
def __init__(self, stream_name, buffer_time, queue, max_count=None,
max_size=None, boto3_session=None, boto3_client_settings=None):
self.stream_name = stream_name
self.buffer_time = buffer_time
self.queue = queue
self.records = []
self.next_records = []
self.alive = True
self.max_count = max_count or self.MAX_COUNT
self.max_size = max_size or self.MAX_SIZE
boto3_client_settings = boto3_client_settings or {}
if boto3_session is None:
boto3_session = boto3.Session()
client_settings = {"service_name": "kinesis"}
client_settings.update(boto3_client_settings)
self.client = boto3_session.client(**client_settings)
self.start()
# Based on https://github.com/NerdWalletOSS/kinesis-python/blob/master/src/kinesis/producer.py:class KinesisProducer
class GEKinesisProducer:
def __init__(self, stream_name, buffer_time=0.5, max_count=None,
max_size=None, boto3_session=None, boto3_client_settings=None, producer_class=None, queue=None):
self.queue = queue or multiprocessing.Queue()
kwargs = {
"stream_name": stream_name,
"buffer_time": buffer_time,
"queue": self.queue,
"max_count": max_count,
"max_size": max_size,
"boto3_session": boto3_session,
"boto3_client_settings": boto3_client_settings
}
self.async_producer = GEAsyncProducer(**kwargs)
def put(self, data, explicit_hash_key=None, partition_key=None):
self.queue.put((data, explicit_hash_key, partition_key)) | 0.767908 | 0.189128 |
import logging
import numpy as np
import rasterio
from skimage import exposure
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from satproc.utils import sliding_windows
__author__ = "<NAME>"
__copyright__ = "Dymaxion Labs"
__license__ = "Apache-2.0"
_logger = logging.getLogger(__name__)
# TODO add win size and step size
def read_window(ds, window):
"""Read from a rasterio dataset using a window
NaNs are coerced to zero.
Parameters
----------
ds : rasterio.Dataset
input dataset
window : rasterio.windows.Window
window to read from
Returns
-------
numpy.ndarray
image data on window
"""
img = ds.read(window=window)
img = np.nan_to_num(img)
return np.dstack(img)
def write_window(img, ds, window):
"""Write array to raster on window
Parameters
----------
img : numpy.ndarray
image array
ds : rasterio.Dataset
dataset to write to (must be opened with write access)
window : rasterio.windows.Window
window to write to
Returns
-------
None
"""
new_img = np.array([img[:, :, i] for i in range(img.shape[2])])
ds.write(new_img, window=window)
def match_histograms(src_path, dst_path, size=128, step_size=128, *, reference_path):
"""Match histograms of an image using another one as reference
Parameters
----------
src_path : str
path to input raster
dst_path : str
path to output raster
size : int
size of windows
step_size : int
step size, when sliding windows
reference_path : str
path to the reference raster
Returns
-------
None
"""
with rasterio.open(src_path) as src:
profile = src.profile.copy()
windows = list(
sliding_windows(
(size, size),
(step_size, step_size),
src.width,
src.height,
)
)
with rasterio.open(reference_path) as ref:
with rasterio.open(dst_path, "w", **profile) as dst:
with logging_redirect_tqdm():
for c, (win, (i, j)) in tqdm(
list(enumerate(windows)), ascii=True, desc="Match histograms"
):
_logger.debug("%s %s", win, (i, j))
img = read_window(src, win)
ref_img = read_window(ref, win)
matched_img = exposure.match_histograms(
img, ref_img, multichannel=True
)
write_window(matched_img, dst, win) | src/satproc/histogram.py | import logging
import numpy as np
import rasterio
from skimage import exposure
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from satproc.utils import sliding_windows
__author__ = "<NAME>"
__copyright__ = "Dymaxion Labs"
__license__ = "Apache-2.0"
_logger = logging.getLogger(__name__)
# TODO add win size and step size
def read_window(ds, window):
"""Read from a rasterio dataset using a window
NaNs are coerced to zero.
Parameters
----------
ds : rasterio.Dataset
input dataset
window : rasterio.windows.Window
window to read from
Returns
-------
numpy.ndarray
image data on window
"""
img = ds.read(window=window)
img = np.nan_to_num(img)
return np.dstack(img)
def write_window(img, ds, window):
"""Write array to raster on window
Parameters
----------
img : numpy.ndarray
image array
ds : rasterio.Dataset
dataset to write to (must be opened with write access)
window : rasterio.windows.Window
window to write to
Returns
-------
None
"""
new_img = np.array([img[:, :, i] for i in range(img.shape[2])])
ds.write(new_img, window=window)
def match_histograms(src_path, dst_path, size=128, step_size=128, *, reference_path):
"""Match histograms of an image using another one as reference
Parameters
----------
src_path : str
path to input raster
dst_path : str
path to output raster
size : int
size of windows
step_size : int
step size, when sliding windows
reference_path : str
path to the reference raster
Returns
-------
None
"""
with rasterio.open(src_path) as src:
profile = src.profile.copy()
windows = list(
sliding_windows(
(size, size),
(step_size, step_size),
src.width,
src.height,
)
)
with rasterio.open(reference_path) as ref:
with rasterio.open(dst_path, "w", **profile) as dst:
with logging_redirect_tqdm():
for c, (win, (i, j)) in tqdm(
list(enumerate(windows)), ascii=True, desc="Match histograms"
):
_logger.debug("%s %s", win, (i, j))
img = read_window(src, win)
ref_img = read_window(ref, win)
matched_img = exposure.match_histograms(
img, ref_img, multichannel=True
)
write_window(matched_img, dst, win) | 0.709422 | 0.273957 |
from neomodel import db
from abc import ABC, abstractmethod, abstractproperty
from typing import List
__all__ = ['centrality_algs', 'AbstractGraphAlg']
class AbstractGraphAlg(ABC):
def __init__(self, processor, algorithm, min_val=0):
self.processor = processor
self.algorithm = algorithm
self.min_val = min_val
def _node_query(self):
return 'MATCH (n:TextNode) RETURN id(n) as id'
def _rel_query(self):
return f"""
MATCH (n:TextNode)-[r:ALG]-(n2:TextNode)
WHERE r.algorithm_name = '{self.algorithm.name}'
AND r.intersection > {self.min_val}
RETURN id(n) as source, id(n2) as target, r.intersection as weight
"""
@abstractproperty
def query(self):
pass
@abstractproperty
def name(self):
pass
def exec_query(self):
"""Выполнить запрос
:rtype: List[int, str,float]
:return: Список [order_id, label, score]
"""
res, meta = db.cypher_query(self.query)
res = [(int(order_id), label, float(score))
for order_id, label, score in res]
return res
class AverageIntersectionCentrality(AbstractGraphAlg):
@property
def name(self):
return "Среднее пересечение"
@property
def query(self):
return f"""
MATCH (n:TextNode)
OPTIONAL MATCH (n)-[r:ALG]-(n2:TextNode)
WHERE r.algorithm_name='{self.algorithm.name}'
WITH avg(r.intersection) as intersection, n as n
RETURN n.order_id, n.label, CASE intersection WHEN null THEN 0
ELSE round(intersection * 10000) / 10000 END
ORDER BY n.order_id
"""
class EigenvectorCentrality(AbstractGraphAlg):
@property
def name(self):
return "Эйгенвектор"
@property
def query(self):
return """
CALL algo.eigenvector.stream("%s", "%s", {
graph: 'cypher',
weightProperty: 'weight',
normalization: 'max',
write: false
})
YIELD nodeId, score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class PageRankCentrality(AbstractGraphAlg):
@property
def name(self):
return "PageRank"
@property
def query(self):
return """
CALL algo.pageRank.stream("%s", "%s", {
graph: 'cypher',
direction: 'BOTH',
weightProperty: 'weight',
write: false
})
YIELD nodeId, score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class ArticleRankCentrality(AbstractGraphAlg):
@property
def name(self):
return "ArticleRank"
@property
def query(self):
return """
CALL algo.articleRank.stream("%s", "%s", {
graph: 'cypher',
direction: 'BOTH',
weightProperty: 'weight',
write: false
})
YIELD nodeId, score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class BeetweennessCentrality(AbstractGraphAlg):
@property
def name(self):
return "Betweenness"
@property
def query(self):
return """
CALL algo.betweenness.stream("%s", "%s", {
graph: 'cypher',
direction: 'BOTH',
write: false
})
YIELD nodeId, centrality as score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class ClosenessCentrality(AbstractGraphAlg):
@property
def name(self):
return "Closeness"
@property
def query(self):
return """
CALL algo.closeness.stream("%s", "%s", {
graph: 'cypher',
write: false
})
YIELD nodeId, centrality as score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class HarmonicCentrality(AbstractGraphAlg):
@property
def name(self):
return "Harmonic Closeness"
@property
def query(self):
return """
CALL algo.closeness.harmonic.stream("%s", "%s", {
graph: 'cypher',
write: false
})
YIELD nodeId, centrality as score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
centrality_algs = [EigenvectorCentrality, AverageIntersectionCentrality,
PageRankCentrality, ArticleRankCentrality,
BeetweennessCentrality, ClosenessCentrality,
HarmonicCentrality] | src/api/graph_algs/centrality.py | from neomodel import db
from abc import ABC, abstractmethod, abstractproperty
from typing import List
__all__ = ['centrality_algs', 'AbstractGraphAlg']
class AbstractGraphAlg(ABC):
def __init__(self, processor, algorithm, min_val=0):
self.processor = processor
self.algorithm = algorithm
self.min_val = min_val
def _node_query(self):
return 'MATCH (n:TextNode) RETURN id(n) as id'
def _rel_query(self):
return f"""
MATCH (n:TextNode)-[r:ALG]-(n2:TextNode)
WHERE r.algorithm_name = '{self.algorithm.name}'
AND r.intersection > {self.min_val}
RETURN id(n) as source, id(n2) as target, r.intersection as weight
"""
@abstractproperty
def query(self):
pass
@abstractproperty
def name(self):
pass
def exec_query(self):
"""Выполнить запрос
:rtype: List[int, str,float]
:return: Список [order_id, label, score]
"""
res, meta = db.cypher_query(self.query)
res = [(int(order_id), label, float(score))
for order_id, label, score in res]
return res
class AverageIntersectionCentrality(AbstractGraphAlg):
@property
def name(self):
return "Среднее пересечение"
@property
def query(self):
return f"""
MATCH (n:TextNode)
OPTIONAL MATCH (n)-[r:ALG]-(n2:TextNode)
WHERE r.algorithm_name='{self.algorithm.name}'
WITH avg(r.intersection) as intersection, n as n
RETURN n.order_id, n.label, CASE intersection WHEN null THEN 0
ELSE round(intersection * 10000) / 10000 END
ORDER BY n.order_id
"""
class EigenvectorCentrality(AbstractGraphAlg):
@property
def name(self):
return "Эйгенвектор"
@property
def query(self):
return """
CALL algo.eigenvector.stream("%s", "%s", {
graph: 'cypher',
weightProperty: 'weight',
normalization: 'max',
write: false
})
YIELD nodeId, score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class PageRankCentrality(AbstractGraphAlg):
@property
def name(self):
return "PageRank"
@property
def query(self):
return """
CALL algo.pageRank.stream("%s", "%s", {
graph: 'cypher',
direction: 'BOTH',
weightProperty: 'weight',
write: false
})
YIELD nodeId, score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class ArticleRankCentrality(AbstractGraphAlg):
@property
def name(self):
return "ArticleRank"
@property
def query(self):
return """
CALL algo.articleRank.stream("%s", "%s", {
graph: 'cypher',
direction: 'BOTH',
weightProperty: 'weight',
write: false
})
YIELD nodeId, score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class BeetweennessCentrality(AbstractGraphAlg):
@property
def name(self):
return "Betweenness"
@property
def query(self):
return """
CALL algo.betweenness.stream("%s", "%s", {
graph: 'cypher',
direction: 'BOTH',
write: false
})
YIELD nodeId, centrality as score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class ClosenessCentrality(AbstractGraphAlg):
@property
def name(self):
return "Closeness"
@property
def query(self):
return """
CALL algo.closeness.stream("%s", "%s", {
graph: 'cypher',
write: false
})
YIELD nodeId, centrality as score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
class HarmonicCentrality(AbstractGraphAlg):
@property
def name(self):
return "Harmonic Closeness"
@property
def query(self):
return """
CALL algo.closeness.harmonic.stream("%s", "%s", {
graph: 'cypher',
write: false
})
YIELD nodeId, centrality as score
RETURN algo.asNode(nodeId).order_id as order_id,
algo.asNode(nodeId).label AS label,
round(score * 10000) / 10000
ORDER BY score DESC
""" % (self._node_query(), self._rel_query())
centrality_algs = [EigenvectorCentrality, AverageIntersectionCentrality,
PageRankCentrality, ArticleRankCentrality,
BeetweennessCentrality, ClosenessCentrality,
HarmonicCentrality] | 0.871775 | 0.283719 |
import torch
import torch.nn as nn
import torchvision
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class AlexNet(nn.Module):
def __init__(self, taskcla):
super(AlexNet, self).__init__()
self.taskcla = taskcla
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.dropout = nn.Dropout()
self.conv1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2)
self.conv2 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.fc1 = nn.Linear(256 * 6 * 6, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.last=torch.nn.ModuleList()
for t,n in self.taskcla:
self.last.append(torch.nn.Linear(4096,n))
self.smid=6
self.gate=torch.nn.Sigmoid()
# All embedding stuff should start with 'e'
self.ec1=torch.nn.Embedding(len(self.taskcla),64)
self.ec2=torch.nn.Embedding(len(self.taskcla),192)
self.ec3=torch.nn.Embedding(len(self.taskcla),384)
self.ec4=torch.nn.Embedding(len(self.taskcla),256)
self.ec5=torch.nn.Embedding(len(self.taskcla),256)
self.efc1=torch.nn.Embedding(len(self.taskcla),4096)
self.efc2=torch.nn.Embedding(len(self.taskcla),4096)
def forward(self,x,t,s, mask_return=False):
# Gates
masks=self.mask(t,s=s)
gc1,gc2,gc3,gc4,gc5,gfc1,gfc2=masks
# Gated
x = self.maxpool(self.relu(self.conv1(x)))
x=x*gc1.view(1,-1,1,1).expand_as(x)
x = self.maxpool(self.relu(self.conv2(x)))
x=x*gc2.view(1,-1,1,1).expand_as(x)
x = self.relu(self.conv3(x))
x=x*gc3.view(1,-1,1,1).expand_as(x)
x = self.relu(self.conv4(x))
x=x*gc4.view(1,-1,1,1).expand_as(x)
x = self.maxpool(self.relu(self.conv5(x)))
x=x*gc5.view(1,-1,1,1).expand_as(x)
x = torch.flatten(x, 1)
x=self.dropout(self.relu(self.fc1(x)))
x=x*gfc1.expand_as(x)
x=self.dropout(self.relu(self.fc2(x)))
x=x*gfc2.expand_as(x)
y = []
for t,i in self.taskcla:
y.append(self.last[t](x))
if mask_return:
return y,masks
return y
def mask(self,t,s=1):
gc1=self.gate(s*self.ec1(t))
gc2=self.gate(s*self.ec2(t))
gc3=self.gate(s*self.ec3(t))
gc4=self.gate(s*self.ec4(t))
gc5=self.gate(s*self.ec5(t))
gfc1=self.gate(s*self.efc1(t))
gfc2=self.gate(s*self.efc2(t))
return [gc1,gc2,gc3,gc4,gc5,gfc1,gfc2]
def get_view_for(self,n,masks):
gc1,gc2,gc3,gc4,gc5,gfc1,gfc2=masks
if n=='fc1.weight':
post=gfc1.data.view(-1,1).expand_as(self.fc1.weight)
pre=gc6.data.view(-1,1,1).expand((self.ec6.weight.size(1),
self.smid,
self.smid)).contiguous().view(1,-1).expand_as(self.fc1.weight)
return torch.min(post,pre)
elif n=='fc1.bias':
return gfc1.data.view(-1)
elif n=='fc2.weight':
post=gfc2.data.view(-1,1).expand_as(self.fc2.weight)
pre=gfc1.data.view(1,-1).expand_as(self.fc2.weight)
return torch.min(post,pre)
elif n=='fc2.bias':
return gfc2.data.view(-1)
elif n=='c1.weight':
return gc1.data.view(-1,1,1,1).expand_as(self.c1.weight)
elif n=='c1.bias':
return gc1.data.view(-1)
elif n=='c2.weight':
post=gc2.data.view(-1,1,1,1).expand_as(self.c2.weight)
pre=gc1.data.view(1,-1,1,1).expand_as(self.c2.weight)
return torch.min(post,pre)
elif n=='c2.bias':
return gc2.data.view(-1)
elif n=='c3.weight':
post=gc3.data.view(-1,1,1,1).expand_as(self.c3.weight)
pre=gc2.data.view(1,-1,1,1).expand_as(self.c3.weight)
return torch.min(post,pre)
elif n=='c3.bias':
return gc3.data.view(-1)
elif n=='c4.weight':
post=gc4.data.view(-1,1,1,1).expand_as(self.c4.weight)
pre=gc3.data.view(1,-1,1,1).expand_as(self.c4.weight)
return torch.min(post,pre)
elif n=='c4.bias':
return gc4.data.view(-1)
elif n=='c5.weight':
post=gc5.data.view(-1,1,1,1).expand_as(self.c5.weight)
pre=gc4.data.view(1,-1,1,1).expand_as(self.c5.weight)
return torch.min(post,pre)
elif n=='c5.bias':
return gc5.data.view(-1)
elif n=='c6.weight':
post=gc6.data.view(-1,1,1,1).expand_as(self.c6.weight)
pre=gc5.data.view(1,-1,1,1).expand_as(self.c6.weight)
return torch.min(post,pre)
elif n=='c6.bias':
return gc6.data.view(-1)
return None
def alexnet(taskcla, pretrained=False):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = AlexNet(taskcla)
if pretrained:
pre_model = torchvision.models.alexnet(pretrained=True)
for key in model.state_dict().keys():
print(key)
for key in pre_model.state_dict().keys():
print(key)
for key1, key2 in zip(model.state_dict().keys(), pre_model.state_dict().keys()):
if 'last' in key1:
break
if model.state_dict()[key1].shape == torch.tensor(1).shape:
model.state_dict()[key1] = pre_model.state_dict()[key2]
else:
model.state_dict()[key1][:] = pre_model.state_dict()[key2][:]
return model | LargeScale/networks/alexnet_hat.py | import torch
import torch.nn as nn
import torchvision
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class AlexNet(nn.Module):
def __init__(self, taskcla):
super(AlexNet, self).__init__()
self.taskcla = taskcla
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.dropout = nn.Dropout()
self.conv1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2)
self.conv2 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.fc1 = nn.Linear(256 * 6 * 6, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.last=torch.nn.ModuleList()
for t,n in self.taskcla:
self.last.append(torch.nn.Linear(4096,n))
self.smid=6
self.gate=torch.nn.Sigmoid()
# All embedding stuff should start with 'e'
self.ec1=torch.nn.Embedding(len(self.taskcla),64)
self.ec2=torch.nn.Embedding(len(self.taskcla),192)
self.ec3=torch.nn.Embedding(len(self.taskcla),384)
self.ec4=torch.nn.Embedding(len(self.taskcla),256)
self.ec5=torch.nn.Embedding(len(self.taskcla),256)
self.efc1=torch.nn.Embedding(len(self.taskcla),4096)
self.efc2=torch.nn.Embedding(len(self.taskcla),4096)
def forward(self,x,t,s, mask_return=False):
# Gates
masks=self.mask(t,s=s)
gc1,gc2,gc3,gc4,gc5,gfc1,gfc2=masks
# Gated
x = self.maxpool(self.relu(self.conv1(x)))
x=x*gc1.view(1,-1,1,1).expand_as(x)
x = self.maxpool(self.relu(self.conv2(x)))
x=x*gc2.view(1,-1,1,1).expand_as(x)
x = self.relu(self.conv3(x))
x=x*gc3.view(1,-1,1,1).expand_as(x)
x = self.relu(self.conv4(x))
x=x*gc4.view(1,-1,1,1).expand_as(x)
x = self.maxpool(self.relu(self.conv5(x)))
x=x*gc5.view(1,-1,1,1).expand_as(x)
x = torch.flatten(x, 1)
x=self.dropout(self.relu(self.fc1(x)))
x=x*gfc1.expand_as(x)
x=self.dropout(self.relu(self.fc2(x)))
x=x*gfc2.expand_as(x)
y = []
for t,i in self.taskcla:
y.append(self.last[t](x))
if mask_return:
return y,masks
return y
def mask(self,t,s=1):
gc1=self.gate(s*self.ec1(t))
gc2=self.gate(s*self.ec2(t))
gc3=self.gate(s*self.ec3(t))
gc4=self.gate(s*self.ec4(t))
gc5=self.gate(s*self.ec5(t))
gfc1=self.gate(s*self.efc1(t))
gfc2=self.gate(s*self.efc2(t))
return [gc1,gc2,gc3,gc4,gc5,gfc1,gfc2]
def get_view_for(self,n,masks):
gc1,gc2,gc3,gc4,gc5,gfc1,gfc2=masks
if n=='fc1.weight':
post=gfc1.data.view(-1,1).expand_as(self.fc1.weight)
pre=gc6.data.view(-1,1,1).expand((self.ec6.weight.size(1),
self.smid,
self.smid)).contiguous().view(1,-1).expand_as(self.fc1.weight)
return torch.min(post,pre)
elif n=='fc1.bias':
return gfc1.data.view(-1)
elif n=='fc2.weight':
post=gfc2.data.view(-1,1).expand_as(self.fc2.weight)
pre=gfc1.data.view(1,-1).expand_as(self.fc2.weight)
return torch.min(post,pre)
elif n=='fc2.bias':
return gfc2.data.view(-1)
elif n=='c1.weight':
return gc1.data.view(-1,1,1,1).expand_as(self.c1.weight)
elif n=='c1.bias':
return gc1.data.view(-1)
elif n=='c2.weight':
post=gc2.data.view(-1,1,1,1).expand_as(self.c2.weight)
pre=gc1.data.view(1,-1,1,1).expand_as(self.c2.weight)
return torch.min(post,pre)
elif n=='c2.bias':
return gc2.data.view(-1)
elif n=='c3.weight':
post=gc3.data.view(-1,1,1,1).expand_as(self.c3.weight)
pre=gc2.data.view(1,-1,1,1).expand_as(self.c3.weight)
return torch.min(post,pre)
elif n=='c3.bias':
return gc3.data.view(-1)
elif n=='c4.weight':
post=gc4.data.view(-1,1,1,1).expand_as(self.c4.weight)
pre=gc3.data.view(1,-1,1,1).expand_as(self.c4.weight)
return torch.min(post,pre)
elif n=='c4.bias':
return gc4.data.view(-1)
elif n=='c5.weight':
post=gc5.data.view(-1,1,1,1).expand_as(self.c5.weight)
pre=gc4.data.view(1,-1,1,1).expand_as(self.c5.weight)
return torch.min(post,pre)
elif n=='c5.bias':
return gc5.data.view(-1)
elif n=='c6.weight':
post=gc6.data.view(-1,1,1,1).expand_as(self.c6.weight)
pre=gc5.data.view(1,-1,1,1).expand_as(self.c6.weight)
return torch.min(post,pre)
elif n=='c6.bias':
return gc6.data.view(-1)
return None
def alexnet(taskcla, pretrained=False):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = AlexNet(taskcla)
if pretrained:
pre_model = torchvision.models.alexnet(pretrained=True)
for key in model.state_dict().keys():
print(key)
for key in pre_model.state_dict().keys():
print(key)
for key1, key2 in zip(model.state_dict().keys(), pre_model.state_dict().keys()):
if 'last' in key1:
break
if model.state_dict()[key1].shape == torch.tensor(1).shape:
model.state_dict()[key1] = pre_model.state_dict()[key2]
else:
model.state_dict()[key1][:] = pre_model.state_dict()[key2][:]
return model | 0.867162 | 0.38292 |
from flask import Flask, render_template, request, flash, jsonify
from flask_sqlalchemy import SQLAlchemy
import psycopg2 # pip install psycopg2
import psycopg2.extras
from geoalchemy2 import Geometry
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:thanhnho@localhost/phunhuan'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.secret_key = 'hi'
db = SQLAlchemy(app)
app.secret_key = "tn"
DB_HOST = "localhost"
DB_NAME = "phunhuan"
DB_USER = "postgres"
DB_PASS = "<PASSWORD>"
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER,
password=<PASSWORD>, host=DB_HOST)
class phongtro(db.Model):
gid = db.Column(db.Integer, primary_key=True)
longitude = db.Column(db.Numeric)
latitude = db.Column(db.Numeric)
diachi = db.Column(db.String(200), nullable=False)
phuong = db.Column(db.String)
dientich = db.Column(db.String)
gia = db.Column(db.String)
dien = db.Column(db.String)
nuoc = db.Column(db.String)
dichvu = db.Column(db.String)
noithat = db.Column(db.String)
songuoi = db.Column(db.Integer)
ghichu = db.Column(db.String)
lienhe = db.Column(db.String)
dienthoai = db.Column(db.String)
geom = db.Column(Geometry('POINT'))
def __init__(self, longitude, latitude, diachi, phuong, dientich, gia, dien, nuoc, dichvu, noithat, songuoi, ghichu, lienhe, dienthoai):
self.longitude = longitude
self.latitude = latitude
self.diachi = diachi
self.phuong = phuong
self.dientich = dientich
self.gia = gia
self.dien = dien
self.nuoc = nuoc
self.dichvu = dichvu
self.noithat = noithat
self.songuoi = songuoi
self.ghichu = ghichu
self.lienhe = lienhe
self.dienthoai = dienthoai
@app.route('/sort')
def sort():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro")
gia = cursor.fetchall()
return render_template('sort.html', gia=gia)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/fetchdeta", methods=["POST", "GET"])
def fetchdeta():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
min = request.form['min']
max = request.form['max']
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro WHERE gia>=(%s) AND gia<=(%s)", [min, max, ])
gia = cursor.fetchall()
return jsonify({'htmlresponse': render_template('response.html', gia=gia)})
except Exception as e:
print(e)
@app.route('/sort_s')
def sort_s():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro")
dientich = cursor.fetchall()
return render_template('sort_s.html', dientich=dientich)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/fetchdetaa", methods=["POST", "GET"])
def fetchdetaa():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
min = request.form['min']
max = request.form['max']
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro WHERE dientich>=(%s) AND dientich<=(%s)", [min, max, ])
dientich = cursor.fetchall()
return jsonify({'htmls': render_template('s.html', dientich=dientich)})
except Exception as e:
print(e)
@app.route("/phuong1", methods=["POST", "GET"])
def phuong1():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong1 = "'Phường 1'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong1))
phuong1 = cursor.fetchall()
return render_template('sort_1.html', phuong1=phuong1)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong2", methods=["POST", "GET"])
def phuong2():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong2 = "'Phường 2'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong2))
phuong2 = cursor.fetchall()
return render_template('sort_2.html', phuong2=phuong2)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong3", methods=["POST", "GET"])
def phuong3():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong3 = "'Phường 3'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong3))
phuong3 = cursor.fetchall()
return render_template('sort_3.html', phuong3=phuong3)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong4", methods=["POST", "GET"])
def phuong4():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong4 = "'Phường 4'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong4))
phuong4 = cursor.fetchall()
return render_template('sort_4.html', phuong4=phuong4)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong5", methods=["POST", "GET"])
def phuong5():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong5 = "'Phường 5'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong5))
phuong5 = cursor.fetchall()
return render_template('sort_5.html', phuong5=phuong5)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong", methods=["POST", "GET"])
def phuong():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuongb = "'Phường 7'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuongb))
phuong = cursor.fetchall()
return render_template('sort_p.html', phuong=phuong)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong8", methods=["POST", "GET"])
def phuong8():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong8 = "'Phường 8'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong8))
phuong8 = cursor.fetchall()
return render_template('sort_8.html', phuong8=phuong8)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong9", methods=["POST", "GET"])
def phuong9():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong9 = "'Phường 9'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong9))
phuong9 = cursor.fetchall()
return render_template('sort_9.html', phuong9=phuong9)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong10", methods=["POST", "GET"])
def phuong10():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong10 = "'Phường 10'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong10))
phuong10 = cursor.fetchall()
return render_template('sort_10.html', phuong10=phuong10)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong11", methods=["POST", "GET"])
def phuong11():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong11 = "'Phường 11'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y,* from phongtro WHERE phuong = {}".format(phuong11))
phuong11 = cursor.fetchall()
return render_template('sort_11.html', phuong11=phuong11)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong12", methods=["POST", "GET"])
def phuong12():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong12 = "'Phường 12'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong12))
phuong12 = cursor.fetchall()
return render_template('sort_12.html', phuong12=phuong12)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong13", methods=["POST", "GET"])
def phuong13():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong13 = "'Phường 13'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong13))
phuong13 = cursor.fetchall()
return render_template('sort_13.html', phuong13=phuong13)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong14", methods=["POST", "GET"])
def phuong14():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong14 = "'Phường 14'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong14))
phuong14 = cursor.fetchall()
return render_template('sort_14.html', phuong14=phuong14)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong15", methods=["POST", "GET"])
def phuong15():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong15 = "'Phường 15'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong15))
phuong15 = cursor.fetchall()
return render_template('sort_15.html', phuong15=phuong15)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong17", methods=["POST", "GET"])
def phuong17():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong17 = "'Phường 17'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong17))
phuong17 = cursor.fetchall()
return render_template('sort_17.html', phuong17=phuong17)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/add")
def add():
return render_template("post.html")
@app.route("/personadd", methods=['POST'])
def personadd():
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
lienhe = request.form["lienhe"]
dienthoai = request.form["dienthoai"]
diachi = request.form["diachi"]
phuong = request.form["phuong"]
gia = request.form["gia"]
dientich = request.form["dientich"]
dien = request.form["dien"]
nuoc = request.form["nuoc"]
dichvu = request.form["dichvu"]
noithat = request.form["noithat"]
songuoi = request.form["songuoi"]
giogiac = request.form["giogiac"]
ghichu = request.form["ghichu"]
lat = request.form["lat"]
lon = request.form["lon"]
cur.execute(
"INSERT INTO phongtro ( lienhe, dienthoai, diachi, phuong, gia, dientich, dien, nuoc, dichvu, noithat, songuoi, giogiac, ghichu, geom) VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', ST_GeomFromText('point ({} {})'))".format(lienhe, dienthoai, diachi, phuong, gia, dientich, dien, nuoc, dichvu, noithat, songuoi, giogiac, ghichu, lat, lon))
conn.commit()
return render_template("post.html")
@app.route('/show')
def Index():
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
s = "SELECT * FROM phongtro"
cur.execute(s) # Execute the SQL
show_phongtro = cur.fetchall()
return render_template('show.html', show_phongtro=show_phongtro)
@app.route('/delete/<string:gid>', methods=['POST', 'GET'])
def delete_student(gid):
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute('DELETE FROM phongtro WHERE gid = {0}'.format(gid))
conn.commit()
flash('Removed Successfully')
return render_template('show.html')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/introduce')
def introduce():
return render_template('introduce.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/list_phuong')
def list_phuong():
return render_template('list_phuong.html')
if __name__ == '__main__':
app.run(host='localhost', port=9847) | app.py | from flask import Flask, render_template, request, flash, jsonify
from flask_sqlalchemy import SQLAlchemy
import psycopg2 # pip install psycopg2
import psycopg2.extras
from geoalchemy2 import Geometry
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:thanhnho@localhost/phunhuan'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.secret_key = 'hi'
db = SQLAlchemy(app)
app.secret_key = "tn"
DB_HOST = "localhost"
DB_NAME = "phunhuan"
DB_USER = "postgres"
DB_PASS = "<PASSWORD>"
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER,
password=<PASSWORD>, host=DB_HOST)
class phongtro(db.Model):
gid = db.Column(db.Integer, primary_key=True)
longitude = db.Column(db.Numeric)
latitude = db.Column(db.Numeric)
diachi = db.Column(db.String(200), nullable=False)
phuong = db.Column(db.String)
dientich = db.Column(db.String)
gia = db.Column(db.String)
dien = db.Column(db.String)
nuoc = db.Column(db.String)
dichvu = db.Column(db.String)
noithat = db.Column(db.String)
songuoi = db.Column(db.Integer)
ghichu = db.Column(db.String)
lienhe = db.Column(db.String)
dienthoai = db.Column(db.String)
geom = db.Column(Geometry('POINT'))
def __init__(self, longitude, latitude, diachi, phuong, dientich, gia, dien, nuoc, dichvu, noithat, songuoi, ghichu, lienhe, dienthoai):
self.longitude = longitude
self.latitude = latitude
self.diachi = diachi
self.phuong = phuong
self.dientich = dientich
self.gia = gia
self.dien = dien
self.nuoc = nuoc
self.dichvu = dichvu
self.noithat = noithat
self.songuoi = songuoi
self.ghichu = ghichu
self.lienhe = lienhe
self.dienthoai = dienthoai
@app.route('/sort')
def sort():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro")
gia = cursor.fetchall()
return render_template('sort.html', gia=gia)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/fetchdeta", methods=["POST", "GET"])
def fetchdeta():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
min = request.form['min']
max = request.form['max']
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro WHERE gia>=(%s) AND gia<=(%s)", [min, max, ])
gia = cursor.fetchall()
return jsonify({'htmlresponse': render_template('response.html', gia=gia)})
except Exception as e:
print(e)
@app.route('/sort_s')
def sort_s():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro")
dientich = cursor.fetchall()
return render_template('sort_s.html', dientich=dientich)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/fetchdetaa", methods=["POST", "GET"])
def fetchdetaa():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
min = request.form['min']
max = request.form['max']
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * FROM phongtro WHERE dientich>=(%s) AND dientich<=(%s)", [min, max, ])
dientich = cursor.fetchall()
return jsonify({'htmls': render_template('s.html', dientich=dientich)})
except Exception as e:
print(e)
@app.route("/phuong1", methods=["POST", "GET"])
def phuong1():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong1 = "'Phường 1'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong1))
phuong1 = cursor.fetchall()
return render_template('sort_1.html', phuong1=phuong1)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong2", methods=["POST", "GET"])
def phuong2():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong2 = "'Phường 2'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong2))
phuong2 = cursor.fetchall()
return render_template('sort_2.html', phuong2=phuong2)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong3", methods=["POST", "GET"])
def phuong3():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong3 = "'Phường 3'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong3))
phuong3 = cursor.fetchall()
return render_template('sort_3.html', phuong3=phuong3)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong4", methods=["POST", "GET"])
def phuong4():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong4 = "'Phường 4'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong4))
phuong4 = cursor.fetchall()
return render_template('sort_4.html', phuong4=phuong4)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong5", methods=["POST", "GET"])
def phuong5():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong5 = "'Phường 5'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong5))
phuong5 = cursor.fetchall()
return render_template('sort_5.html', phuong5=phuong5)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong", methods=["POST", "GET"])
def phuong():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuongb = "'Phường 7'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuongb))
phuong = cursor.fetchall()
return render_template('sort_p.html', phuong=phuong)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong8", methods=["POST", "GET"])
def phuong8():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong8 = "'Phường 8'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong8))
phuong8 = cursor.fetchall()
return render_template('sort_8.html', phuong8=phuong8)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong9", methods=["POST", "GET"])
def phuong9():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong9 = "'Phường 9'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong9))
phuong9 = cursor.fetchall()
return render_template('sort_9.html', phuong9=phuong9)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong10", methods=["POST", "GET"])
def phuong10():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong10 = "'Phường 10'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong10))
phuong10 = cursor.fetchall()
return render_template('sort_10.html', phuong10=phuong10)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong11", methods=["POST", "GET"])
def phuong11():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong11 = "'Phường 11'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y,* from phongtro WHERE phuong = {}".format(phuong11))
phuong11 = cursor.fetchall()
return render_template('sort_11.html', phuong11=phuong11)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong12", methods=["POST", "GET"])
def phuong12():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong12 = "'Phường 12'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong12))
phuong12 = cursor.fetchall()
return render_template('sort_12.html', phuong12=phuong12)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong13", methods=["POST", "GET"])
def phuong13():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong13 = "'Phường 13'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong13))
phuong13 = cursor.fetchall()
return render_template('sort_13.html', phuong13=phuong13)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong14", methods=["POST", "GET"])
def phuong14():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong14 = "'Phường 14'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong14))
phuong14 = cursor.fetchall()
return render_template('sort_14.html', phuong14=phuong14)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong15", methods=["POST", "GET"])
def phuong15():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong15 = "'Phường 15'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong15))
phuong15 = cursor.fetchall()
return render_template('sort_15.html', phuong15=phuong15)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/phuong17", methods=["POST", "GET"])
def phuong17():
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phuong17 = "'Phường 17'"
cursor.execute(
"SELECT ST_X(geom) as x, ST_Y(geom) as y, * from phongtro WHERE phuong = {}".format(phuong17))
phuong17 = cursor.fetchall()
return render_template('sort_17.html', phuong17=phuong17)
except Exception as e:
print(e)
finally:
cursor.close()
@app.route("/add")
def add():
return render_template("post.html")
@app.route("/personadd", methods=['POST'])
def personadd():
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
lienhe = request.form["lienhe"]
dienthoai = request.form["dienthoai"]
diachi = request.form["diachi"]
phuong = request.form["phuong"]
gia = request.form["gia"]
dientich = request.form["dientich"]
dien = request.form["dien"]
nuoc = request.form["nuoc"]
dichvu = request.form["dichvu"]
noithat = request.form["noithat"]
songuoi = request.form["songuoi"]
giogiac = request.form["giogiac"]
ghichu = request.form["ghichu"]
lat = request.form["lat"]
lon = request.form["lon"]
cur.execute(
"INSERT INTO phongtro ( lienhe, dienthoai, diachi, phuong, gia, dientich, dien, nuoc, dichvu, noithat, songuoi, giogiac, ghichu, geom) VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', ST_GeomFromText('point ({} {})'))".format(lienhe, dienthoai, diachi, phuong, gia, dientich, dien, nuoc, dichvu, noithat, songuoi, giogiac, ghichu, lat, lon))
conn.commit()
return render_template("post.html")
@app.route('/show')
def Index():
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
s = "SELECT * FROM phongtro"
cur.execute(s) # Execute the SQL
show_phongtro = cur.fetchall()
return render_template('show.html', show_phongtro=show_phongtro)
@app.route('/delete/<string:gid>', methods=['POST', 'GET'])
def delete_student(gid):
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute('DELETE FROM phongtro WHERE gid = {0}'.format(gid))
conn.commit()
flash('Removed Successfully')
return render_template('show.html')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/introduce')
def introduce():
return render_template('introduce.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/list_phuong')
def list_phuong():
return render_template('list_phuong.html')
if __name__ == '__main__':
app.run(host='localhost', port=9847) | 0.243822 | 0.066116 |
import json
import urllib2
import time
import matplotlib.pyplot as plt
import sys
CONF = {
'sensor': "192.168.11.7:8080", # ESP8266 (IP fixed/static assigned on DHCP server/router)
# 'sensor': "192.168.11.13:80", # Arduino Yun (IP not fixed...)
'interval_update': 20.,
'interval_timeout': 3.,
'log_file': "Yun_ESP8266_SHT31_WiFi_REST.log",
'fmt_print': "%s, %14.3f, %9.3f °C, %9.3f %%rf, %9.3f V",
'fmt_write': "%s, %14.3f, %9.3f, %9.3f, %9.3f",
}
CONF['interval_timeout'] = max(CONF['interval_timeout'], CONF['interval_update']/10)
def read_mon_values():
ret = []
# data = urllib2.urlopen("http://arduino.local/arduino/mon/U").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/I").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/C").read()
# ret.append( data.split()[-1][:-3] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/E").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/t").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/W").read()
# ret.append( data.split()[-1][:-1] )
while True:
try:
# data = json.load(urllib2.urlopen("http://%s/" % CONF['sensor'], timeout = CONF['interval_timeout']))
data = json.loads(unicode(urllib2.urlopen("http://%s/" % CONF['sensor'], timeout = CONF['interval_timeout']).read(), errors='replace'))
break
except KeyboardInterrupt:
print "Ctrl+C: quit."
sys.exit()
except:
print sys.exc_info()[0], sys.exc_info()[1]
#print sys.exc_info()[2]
#print data["variables"]["temperature"]
#print data["variables"]["humidity"]
ret.append( data["variables"]["temperature"] )
ret.append( data["variables"]["humidity"] )
ret.append( data["variables"]["Vs"] )
return tuple(map(float, ret))
def blink():
urllib2.urlopen("http://%s/arduino/digital/13/1" % CONF['sensor'])
time.sleep(.25)
urllib2.urlopen("http://%s/arduino/digital/13/0" % CONF['sensor'])
#plt.axis([0, 10, 0, 1])
plt.ylim([0., 100.])
plt.grid(True)
ax = plt.gca()
#ax.set_xticks([1., 2., 3., 4., 5.])
ax.set_yticks(range(0, 110, 10))
plt.ion()
print "Run using this configuration:"
print json.dumps(CONF, indent=4, sort_keys=True)
print "Retrieving live data from Yun, starting ..."
#print urllib2.urlopen("http://%s/" % CONF['sensor']).read()
#parsed = json.load(urllib2.urlopen("http://%s/" % CONF['sensor']))
parsed = json.loads(unicode(urllib2.urlopen("http://%s/" % CONF['sensor']).read(), errors='replace'))
print json.dumps(parsed, indent=4, sort_keys=True)
blink()
while True:
temperature, humidity, Vs = read_mon_values() # reading may take some time ...
ts = (time.asctime(), time.time()) # ... thus get time afterwards
output = CONF['fmt_print'] % (ts + (temperature, humidity, Vs))
print output
output = CONF['fmt_write'] % (ts + (temperature, humidity, Vs))
with open(CONF['log_file'], "a") as log:
log.write(output + "\n")
plt.scatter([ts[1]]*3, [temperature, humidity, Vs], color=['r', 'b', 'g'])
#time.sleep(CONF['interval_update'])
plt.pause(CONF['interval_update']) | Yun_SHT31_WiFi_REST/Yun_ESP8266_SHT31_WiFi_REST.py | import json
import urllib2
import time
import matplotlib.pyplot as plt
import sys
CONF = {
'sensor': "192.168.11.7:8080", # ESP8266 (IP fixed/static assigned on DHCP server/router)
# 'sensor': "192.168.11.13:80", # Arduino Yun (IP not fixed...)
'interval_update': 20.,
'interval_timeout': 3.,
'log_file': "Yun_ESP8266_SHT31_WiFi_REST.log",
'fmt_print': "%s, %14.3f, %9.3f °C, %9.3f %%rf, %9.3f V",
'fmt_write': "%s, %14.3f, %9.3f, %9.3f, %9.3f",
}
CONF['interval_timeout'] = max(CONF['interval_timeout'], CONF['interval_update']/10)
def read_mon_values():
ret = []
# data = urllib2.urlopen("http://arduino.local/arduino/mon/U").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/I").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/C").read()
# ret.append( data.split()[-1][:-3] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/E").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/t").read()
# ret.append( data.split()[-1][:-1] )
# data = urllib2.urlopen("http://arduino.local/arduino/mon/W").read()
# ret.append( data.split()[-1][:-1] )
while True:
try:
# data = json.load(urllib2.urlopen("http://%s/" % CONF['sensor'], timeout = CONF['interval_timeout']))
data = json.loads(unicode(urllib2.urlopen("http://%s/" % CONF['sensor'], timeout = CONF['interval_timeout']).read(), errors='replace'))
break
except KeyboardInterrupt:
print "Ctrl+C: quit."
sys.exit()
except:
print sys.exc_info()[0], sys.exc_info()[1]
#print sys.exc_info()[2]
#print data["variables"]["temperature"]
#print data["variables"]["humidity"]
ret.append( data["variables"]["temperature"] )
ret.append( data["variables"]["humidity"] )
ret.append( data["variables"]["Vs"] )
return tuple(map(float, ret))
def blink():
urllib2.urlopen("http://%s/arduino/digital/13/1" % CONF['sensor'])
time.sleep(.25)
urllib2.urlopen("http://%s/arduino/digital/13/0" % CONF['sensor'])
#plt.axis([0, 10, 0, 1])
plt.ylim([0., 100.])
plt.grid(True)
ax = plt.gca()
#ax.set_xticks([1., 2., 3., 4., 5.])
ax.set_yticks(range(0, 110, 10))
plt.ion()
print "Run using this configuration:"
print json.dumps(CONF, indent=4, sort_keys=True)
print "Retrieving live data from Yun, starting ..."
#print urllib2.urlopen("http://%s/" % CONF['sensor']).read()
#parsed = json.load(urllib2.urlopen("http://%s/" % CONF['sensor']))
parsed = json.loads(unicode(urllib2.urlopen("http://%s/" % CONF['sensor']).read(), errors='replace'))
print json.dumps(parsed, indent=4, sort_keys=True)
blink()
while True:
temperature, humidity, Vs = read_mon_values() # reading may take some time ...
ts = (time.asctime(), time.time()) # ... thus get time afterwards
output = CONF['fmt_print'] % (ts + (temperature, humidity, Vs))
print output
output = CONF['fmt_write'] % (ts + (temperature, humidity, Vs))
with open(CONF['log_file'], "a") as log:
log.write(output + "\n")
plt.scatter([ts[1]]*3, [temperature, humidity, Vs], color=['r', 'b', 'g'])
#time.sleep(CONF['interval_update'])
plt.pause(CONF['interval_update']) | 0.169646 | 0.156041 |
import torch
from torch import nn
class BCE_VIRAT(nn.Module):
def __init__(self, reduction="mean", hard_thres=-1):
"""
:param hard_thres:
-1:软标签损失,直接基于标注中的软标签计算BECLoss;
>0:硬标签损失,将标签大于hard_thres的置为1,否则为0;
"""
super(BCE_VIRAT, self).__init__()
self.hard_thres = hard_thres
self._loss_fn = nn.BCEWithLogitsLoss(reduction=reduction)
# self._loss_fn = nn.BCEWithLogitsLoss(reduction="none")
def forward(self, x, y):
if self.hard_thres > 0: # 硬标签
mask = y > self.hard_thres
y[mask] = 1.
y[~mask] = 0.
# weight = torch.tensor([2.0869271159324994, 1.0968095583318394, 4.667857504911766, 1.6595608352187452, 3.6011781840303687, 2.6403159830224547, 4.869071729774468], device=x.device)
# pos_weight = torch.tensor([1.954460531501126, 0.6904418649003278, 4.658420747351811, 1.4485650738429943, 3.5735073030418634, 2.5663047647752473, 4.861361591348501], device=x.device)
# _loss_fn = nn.BCEWithLogitsLoss(reduction="mean", weight=weight, pos_weight=pos_weight)
weight = torch.tensor([1,1,1,1,1,1,1,0.1], device=x.device)
_loss_fn = nn.BCEWithLogitsLoss(reduction="mean", weight=weight)
# loss = self._loss_fn(x, y)
loss = _loss_fn(x, y)
'''
# acsl
with torch.no_grad():
sigmoid_cls_logits = torch.sigmoid(x)
weight_mask = sigmoid_cls_logits.ge(0.5)
weight_mask = weight_mask + y.to(torch.bool)
# 新增背景类赋予0.1的权值
# weight_mask = torch.where(weight_mask, torch.tensor(1,device=x.device).float(), torch.tensor(0.5,device=x.device).float())
n_i, _ = sigmoid_cls_logits.size()
loss = torch.sum(weight_mask * loss) / n_i
'''
return loss
if __name__ == '__main__':
label = torch.tensor([
[0, 1, .5],
[1, 0, .5]
], dtype=torch.float64)
pred = torch.tensor([
[-1000, 1000, 0],
[1000, -1000, 0]
], dtype=torch.float32, requires_grad=True)
loss_fn = BCE_VIRAT(hard_thres=0.6)
loss = loss_fn(pred, label)
loss.backward()
print(loss)
print(pred.grad) | slowfast/models/loss_virat.py | import torch
from torch import nn
class BCE_VIRAT(nn.Module):
def __init__(self, reduction="mean", hard_thres=-1):
"""
:param hard_thres:
-1:软标签损失,直接基于标注中的软标签计算BECLoss;
>0:硬标签损失,将标签大于hard_thres的置为1,否则为0;
"""
super(BCE_VIRAT, self).__init__()
self.hard_thres = hard_thres
self._loss_fn = nn.BCEWithLogitsLoss(reduction=reduction)
# self._loss_fn = nn.BCEWithLogitsLoss(reduction="none")
def forward(self, x, y):
if self.hard_thres > 0: # 硬标签
mask = y > self.hard_thres
y[mask] = 1.
y[~mask] = 0.
# weight = torch.tensor([2.0869271159324994, 1.0968095583318394, 4.667857504911766, 1.6595608352187452, 3.6011781840303687, 2.6403159830224547, 4.869071729774468], device=x.device)
# pos_weight = torch.tensor([1.954460531501126, 0.6904418649003278, 4.658420747351811, 1.4485650738429943, 3.5735073030418634, 2.5663047647752473, 4.861361591348501], device=x.device)
# _loss_fn = nn.BCEWithLogitsLoss(reduction="mean", weight=weight, pos_weight=pos_weight)
weight = torch.tensor([1,1,1,1,1,1,1,0.1], device=x.device)
_loss_fn = nn.BCEWithLogitsLoss(reduction="mean", weight=weight)
# loss = self._loss_fn(x, y)
loss = _loss_fn(x, y)
'''
# acsl
with torch.no_grad():
sigmoid_cls_logits = torch.sigmoid(x)
weight_mask = sigmoid_cls_logits.ge(0.5)
weight_mask = weight_mask + y.to(torch.bool)
# 新增背景类赋予0.1的权值
# weight_mask = torch.where(weight_mask, torch.tensor(1,device=x.device).float(), torch.tensor(0.5,device=x.device).float())
n_i, _ = sigmoid_cls_logits.size()
loss = torch.sum(weight_mask * loss) / n_i
'''
return loss
if __name__ == '__main__':
label = torch.tensor([
[0, 1, .5],
[1, 0, .5]
], dtype=torch.float64)
pred = torch.tensor([
[-1000, 1000, 0],
[1000, -1000, 0]
], dtype=torch.float32, requires_grad=True)
loss_fn = BCE_VIRAT(hard_thres=0.6)
loss = loss_fn(pred, label)
loss.backward()
print(loss)
print(pred.grad) | 0.867892 | 0.343562 |
import unittest
from shardingpy.exception import SQLParsingException
from shardingpy.parsing.lexer.dialect.mysql import MySQLLexer
from shardingpy.parsing.lexer.lexer import Lexer
from shardingpy.parsing.lexer.token import *
class LexerTestCase(unittest.TestCase):
dictionary = Dictionary()
def test_next_token_for_white_space(self):
lexer = Lexer("Select * from \r\n TABLE_XXX \t", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "Select")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "from")
self.assert_next_token(lexer, Literals.IDENTIFIER, "TABLE_XXX")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_order_by(self):
lexer = Lexer("SELECT * FROM ORDER ORDER \t BY XX DESC", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "ORDER")
self.assert_next_token(lexer, DefaultKeyword.ORDER, "ORDER")
self.assert_next_token(lexer, DefaultKeyword.BY, "BY")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, DefaultKeyword.DESC, "DESC")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_group_by(self):
lexer = Lexer("SELECT * FROM `XXX` GROUP BY XX DESC", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "`XXX`")
self.assert_next_token(lexer, DefaultKeyword.GROUP, "GROUP")
self.assert_next_token(lexer, DefaultKeyword.BY, "BY")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, DefaultKeyword.DESC, "DESC")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_ambiguous_group_by(self):
lexer = Lexer("SELECT * FROM GROUP GROUP \t BY XX DESC", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "GROUP")
self.assert_next_token(lexer, DefaultKeyword.GROUP, "GROUP")
self.assert_next_token(lexer, DefaultKeyword.BY, "BY")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, DefaultKeyword.DESC, "DESC")
self.assert_next_token(lexer, Assist.END, "")
def assert_next_token(self, lexer, expected_token_type, expected_literals):
lexer.next_token()
current_token = lexer.get_current_token()
self.assertEqual(current_token.token_type, expected_token_type)
self.assertEqual(current_token.literals, expected_literals)
def test_next_token_for_number(self):
self.assert_next_token_for_number("0x1e", Literals.HEX)
self.assert_next_token_for_number("0x-1e", Literals.HEX)
self.assert_next_token_for_number("1243", Literals.INT)
self.assert_next_token_for_number("-123", Literals.INT)
self.assert_next_token_for_number("-.123", Literals.FLOAT)
self.assert_next_token_for_number("123.0", Literals.FLOAT)
self.assert_next_token_for_number("123e4", Literals.FLOAT)
self.assert_next_token_for_number("123E4", Literals.FLOAT)
self.assert_next_token_for_number("123e+4", Literals.FLOAT)
self.assert_next_token_for_number("123E+4", Literals.FLOAT)
self.assert_next_token_for_number("123e-4", Literals.FLOAT)
self.assert_next_token_for_number("123E-4", Literals.FLOAT)
self.assert_next_token_for_number(".5", Literals.FLOAT)
self.assert_next_token_for_number("123f", Literals.FLOAT)
self.assert_next_token_for_number("123F", Literals.FLOAT)
self.assert_next_token_for_number(".5F", Literals.FLOAT)
self.assert_next_token_for_number("123d", Literals.FLOAT)
self.assert_next_token_for_number("123D", Literals.FLOAT)
def assert_next_token_for_number(self, expected_number, expected_token_type):
lexer = Lexer("select * from XXX_TABLE where xx={} and yy={}".format(expected_number, expected_number),
LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "select")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "from")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "where")
self.assert_next_token(lexer, Literals.IDENTIFIER, "xx")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, expected_token_type, expected_number)
self.assert_next_token(lexer, DefaultKeyword.AND, "and")
self.assert_next_token(lexer, Literals.IDENTIFIER, "yy")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, expected_token_type, expected_number)
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_single_line_comment(self):
lexer = Lexer("SELECT * FROM XXX_TABLE --x\"y`z \n WHERE XX=1 //x\"y'z", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "WHERE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, Literals.INT, "1")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_multiple_line_comment(self):
lexer = Lexer("SELECT * FROM XXX_TABLE /*--xyz \n WHERE XX=1 //xyz*/ WHERE YY>2 /*--xyz //xyz*/",
LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "WHERE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "YY")
self.assert_next_token(lexer, Symbol.GT, ">")
self.assert_next_token(lexer, Literals.INT, "2")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_n_char(self):
lexer = Lexer("SELECT * FROM XXX_TABLE WHERE XX=N'xx'", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "WHERE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, Literals.IDENTIFIER, "N")
self.assert_next_token(lexer, Literals.CHARS, "xx")
self.assert_next_token(lexer, Assist.END, "")
def test_syntax_error_for_unclosed_char(self):
lexer = Lexer("UPDATE product p SET p.title='Title's',s.description='中文' WHERE p.product_id=?",
LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.UPDATE, "UPDATE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "product")
self.assert_next_token(lexer, Literals.IDENTIFIER, "p")
self.assert_next_token(lexer, DefaultKeyword.SET, "SET")
self.assert_next_token(lexer, Literals.IDENTIFIER, "p")
self.assert_next_token(lexer, Symbol.DOT, ".")
self.assert_next_token(lexer, Literals.IDENTIFIER, "title")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, Literals.CHARS, "Title")
self.assert_next_token(lexer, Literals.IDENTIFIER, "s")
self.assert_next_token(lexer, Literals.CHARS, ",s.description=")
try:
lexer.next_token()
except SQLParsingException as e:
self.assertEqual(str(e),
"SQL syntax error, expected token is ERROR, actual token is CHARS, literals is ',s.description='.")
class MySQLLexerTest(unittest.TestCase):
def test_next_token_for_hint(self):
lexer = MySQLLexer("SELECT * FROM XXX_TABLE /*! hint 1 \n xxx */ WHERE XX>1 /*!hint 2*/")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Symbol.STAR, '*')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, DefaultKeyword.WHERE, 'WHERE')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XX')
self.assert_next_token(lexer, Symbol.GT, '>')
self.assert_next_token(lexer, Literals.INT, '1')
self.assert_next_token(lexer, Assist.END, '')
def test_next_token_for_comment(self):
lexer = MySQLLexer("SELECT * FROM XXX_TABLE # xxx ")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Symbol.STAR, '*')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, Assist.END, '')
def test_next_token_for_multipule_lines_comment(self):
lexer = MySQLLexer("SELECT * FROM XXX_TABLE # comment 1 \n #comment 2 \r\n WHERE XX<=1")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Symbol.STAR, '*')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, DefaultKeyword.WHERE, 'WHERE')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XX')
self.assert_next_token(lexer, Symbol.LT_EQ, '<=')
self.assert_next_token(lexer, Literals.INT, '1')
self.assert_next_token(lexer, Assist.END, '')
def test_next_token_for_variable(self):
lexer = MySQLLexer("SELECT @x1:=1 FROM XXX_TABLE WHERE XX= @@global.x1")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Literals.VARIABLE, '@x1')
self.assert_next_token(lexer, Symbol.COLON_EQ, ':=')
self.assert_next_token(lexer, Literals.INT, '1')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, DefaultKeyword.WHERE, 'WHERE')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XX')
self.assert_next_token(lexer, Symbol.EQ, '=')
self.assert_next_token(lexer, Literals.VARIABLE, '@@global.x1')
self.assert_next_token(lexer, Assist.END, '')
def assert_next_token(self, lexer, expected_token_type, expected_literals):
lexer.next_token()
current_token = lexer.get_current_token()
self.assertEqual(current_token.token_type, expected_token_type)
self.assertEqual(current_token.literals, expected_literals) | tests/parsing/lexer/test_lexer.py | import unittest
from shardingpy.exception import SQLParsingException
from shardingpy.parsing.lexer.dialect.mysql import MySQLLexer
from shardingpy.parsing.lexer.lexer import Lexer
from shardingpy.parsing.lexer.token import *
class LexerTestCase(unittest.TestCase):
dictionary = Dictionary()
def test_next_token_for_white_space(self):
lexer = Lexer("Select * from \r\n TABLE_XXX \t", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "Select")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "from")
self.assert_next_token(lexer, Literals.IDENTIFIER, "TABLE_XXX")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_order_by(self):
lexer = Lexer("SELECT * FROM ORDER ORDER \t BY XX DESC", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "ORDER")
self.assert_next_token(lexer, DefaultKeyword.ORDER, "ORDER")
self.assert_next_token(lexer, DefaultKeyword.BY, "BY")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, DefaultKeyword.DESC, "DESC")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_group_by(self):
lexer = Lexer("SELECT * FROM `XXX` GROUP BY XX DESC", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "`XXX`")
self.assert_next_token(lexer, DefaultKeyword.GROUP, "GROUP")
self.assert_next_token(lexer, DefaultKeyword.BY, "BY")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, DefaultKeyword.DESC, "DESC")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_ambiguous_group_by(self):
lexer = Lexer("SELECT * FROM GROUP GROUP \t BY XX DESC", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "GROUP")
self.assert_next_token(lexer, DefaultKeyword.GROUP, "GROUP")
self.assert_next_token(lexer, DefaultKeyword.BY, "BY")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, DefaultKeyword.DESC, "DESC")
self.assert_next_token(lexer, Assist.END, "")
def assert_next_token(self, lexer, expected_token_type, expected_literals):
lexer.next_token()
current_token = lexer.get_current_token()
self.assertEqual(current_token.token_type, expected_token_type)
self.assertEqual(current_token.literals, expected_literals)
def test_next_token_for_number(self):
self.assert_next_token_for_number("0x1e", Literals.HEX)
self.assert_next_token_for_number("0x-1e", Literals.HEX)
self.assert_next_token_for_number("1243", Literals.INT)
self.assert_next_token_for_number("-123", Literals.INT)
self.assert_next_token_for_number("-.123", Literals.FLOAT)
self.assert_next_token_for_number("123.0", Literals.FLOAT)
self.assert_next_token_for_number("123e4", Literals.FLOAT)
self.assert_next_token_for_number("123E4", Literals.FLOAT)
self.assert_next_token_for_number("123e+4", Literals.FLOAT)
self.assert_next_token_for_number("123E+4", Literals.FLOAT)
self.assert_next_token_for_number("123e-4", Literals.FLOAT)
self.assert_next_token_for_number("123E-4", Literals.FLOAT)
self.assert_next_token_for_number(".5", Literals.FLOAT)
self.assert_next_token_for_number("123f", Literals.FLOAT)
self.assert_next_token_for_number("123F", Literals.FLOAT)
self.assert_next_token_for_number(".5F", Literals.FLOAT)
self.assert_next_token_for_number("123d", Literals.FLOAT)
self.assert_next_token_for_number("123D", Literals.FLOAT)
def assert_next_token_for_number(self, expected_number, expected_token_type):
lexer = Lexer("select * from XXX_TABLE where xx={} and yy={}".format(expected_number, expected_number),
LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "select")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "from")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "where")
self.assert_next_token(lexer, Literals.IDENTIFIER, "xx")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, expected_token_type, expected_number)
self.assert_next_token(lexer, DefaultKeyword.AND, "and")
self.assert_next_token(lexer, Literals.IDENTIFIER, "yy")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, expected_token_type, expected_number)
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_single_line_comment(self):
lexer = Lexer("SELECT * FROM XXX_TABLE --x\"y`z \n WHERE XX=1 //x\"y'z", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "WHERE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, Literals.INT, "1")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_multiple_line_comment(self):
lexer = Lexer("SELECT * FROM XXX_TABLE /*--xyz \n WHERE XX=1 //xyz*/ WHERE YY>2 /*--xyz //xyz*/",
LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "WHERE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "YY")
self.assert_next_token(lexer, Symbol.GT, ">")
self.assert_next_token(lexer, Literals.INT, "2")
self.assert_next_token(lexer, Assist.END, "")
def test_next_token_for_n_char(self):
lexer = Lexer("SELECT * FROM XXX_TABLE WHERE XX=N'xx'", LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.SELECT, "SELECT")
self.assert_next_token(lexer, Symbol.STAR, "*")
self.assert_next_token(lexer, DefaultKeyword.FROM, "FROM")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XXX_TABLE")
self.assert_next_token(lexer, DefaultKeyword.WHERE, "WHERE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "XX")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, Literals.IDENTIFIER, "N")
self.assert_next_token(lexer, Literals.CHARS, "xx")
self.assert_next_token(lexer, Assist.END, "")
def test_syntax_error_for_unclosed_char(self):
lexer = Lexer("UPDATE product p SET p.title='Title's',s.description='中文' WHERE p.product_id=?",
LexerTestCase.dictionary)
self.assert_next_token(lexer, DefaultKeyword.UPDATE, "UPDATE")
self.assert_next_token(lexer, Literals.IDENTIFIER, "product")
self.assert_next_token(lexer, Literals.IDENTIFIER, "p")
self.assert_next_token(lexer, DefaultKeyword.SET, "SET")
self.assert_next_token(lexer, Literals.IDENTIFIER, "p")
self.assert_next_token(lexer, Symbol.DOT, ".")
self.assert_next_token(lexer, Literals.IDENTIFIER, "title")
self.assert_next_token(lexer, Symbol.EQ, "=")
self.assert_next_token(lexer, Literals.CHARS, "Title")
self.assert_next_token(lexer, Literals.IDENTIFIER, "s")
self.assert_next_token(lexer, Literals.CHARS, ",s.description=")
try:
lexer.next_token()
except SQLParsingException as e:
self.assertEqual(str(e),
"SQL syntax error, expected token is ERROR, actual token is CHARS, literals is ',s.description='.")
class MySQLLexerTest(unittest.TestCase):
def test_next_token_for_hint(self):
lexer = MySQLLexer("SELECT * FROM XXX_TABLE /*! hint 1 \n xxx */ WHERE XX>1 /*!hint 2*/")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Symbol.STAR, '*')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, DefaultKeyword.WHERE, 'WHERE')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XX')
self.assert_next_token(lexer, Symbol.GT, '>')
self.assert_next_token(lexer, Literals.INT, '1')
self.assert_next_token(lexer, Assist.END, '')
def test_next_token_for_comment(self):
lexer = MySQLLexer("SELECT * FROM XXX_TABLE # xxx ")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Symbol.STAR, '*')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, Assist.END, '')
def test_next_token_for_multipule_lines_comment(self):
lexer = MySQLLexer("SELECT * FROM XXX_TABLE # comment 1 \n #comment 2 \r\n WHERE XX<=1")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Symbol.STAR, '*')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, DefaultKeyword.WHERE, 'WHERE')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XX')
self.assert_next_token(lexer, Symbol.LT_EQ, '<=')
self.assert_next_token(lexer, Literals.INT, '1')
self.assert_next_token(lexer, Assist.END, '')
def test_next_token_for_variable(self):
lexer = MySQLLexer("SELECT @x1:=1 FROM XXX_TABLE WHERE XX= @@global.x1")
self.assert_next_token(lexer, DefaultKeyword.SELECT, 'SELECT')
self.assert_next_token(lexer, Literals.VARIABLE, '@x1')
self.assert_next_token(lexer, Symbol.COLON_EQ, ':=')
self.assert_next_token(lexer, Literals.INT, '1')
self.assert_next_token(lexer, DefaultKeyword.FROM, 'FROM')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XXX_TABLE')
self.assert_next_token(lexer, DefaultKeyword.WHERE, 'WHERE')
self.assert_next_token(lexer, Literals.IDENTIFIER, 'XX')
self.assert_next_token(lexer, Symbol.EQ, '=')
self.assert_next_token(lexer, Literals.VARIABLE, '@@global.x1')
self.assert_next_token(lexer, Assist.END, '')
def assert_next_token(self, lexer, expected_token_type, expected_literals):
lexer.next_token()
current_token = lexer.get_current_token()
self.assertEqual(current_token.token_type, expected_token_type)
self.assertEqual(current_token.literals, expected_literals) | 0.624179 | 0.486941 |
import json
data = '''
[
{
"name":"Alena",
"count":100
},
{
"name":"Levon",
"count":97
},
{
"name":"Shakira",
"count":96
},
{
"name":"Keerah",
"count":95
},
{
"name":"Anesu",
"count":92
},
{
"name":"Zishan",
"count":90
},
{
"name":"Francesco",
"count":87
},
{
"name":"Camron",
"count":87
},
{
"name":"Brannan",
"count":83
},
{
"name":"Karly",
"count":82
},
{
"name":"Ohran",
"count":81
},
{
"name":"Oswald",
"count":80
},
{
"name":"Fasai",
"count":78
},
{
"name":"Renas",
"count":76
},
{
"name":"Devon",
"count":76
},
{
"name":"McKenzie",
"count":73
},
{
"name":"Caelan",
"count":69
},
{
"name":"Ayshah",
"count":65
},
{
"name":"Lettice",
"count":64
},
{
"name":"Cariss",
"count":56
},
{
"name":"Johannes",
"count":52
},
{
"name":"Melissande",
"count":52
},
{
"name":"Yishuka",
"count":51
},
{
"name":"Tymon",
"count":51
},
{
"name":"Aleksandra",
"count":50
},
{
"name":"Macaully",
"count":49
},
{
"name":"Sharlene",
"count":48
},
{
"name":"Lucie",
"count":46
},
{
"name":"Ellyce",
"count":45
},
{
"name":"Shalamar",
"count":45
},
{
"name":"Rehaan",
"count":45
},
{
"name":"Macie",
"count":43
},
{
"name":"Harjyot",
"count":37
},
{
"name":"Jumaimah",
"count":36
},
{
"name":"Dominic",
"count":30
},
{
"name":"Shahed",
"count":30
},
{
"name":"Daumantas",
"count":29
},
{
"name":"Muhammad",
"count":28
},
{
"name":"Latisha",
"count":27
},
{
"name":"Mahan",
"count":24
},
{
"name":"Mirren",
"count":23
},
{
"name":"Farhan",
"count":21
},
{
"name":"Emmet",
"count":19
},
{
"name":"Ilysa",
"count":16
},
{
"name":"Cruz",
"count":14
},
{
"name":"Kirk",
"count":14
},
{
"name":"Greig",
"count":11
},
{
"name":"Angelo",
"count":9
},
{
"name":"Leena",
"count":9
},
{
"name":"Kaelan",
"count":8
}
]'''
info = json.loads(data)
print('User count:', len(info))
numlist = list()
for item in info:
num = item ['count']
value = int(num)
numlist.append(value)
total = sum(numlist)
print(total) | Walkthru_13/testcode.py | import json
data = '''
[
{
"name":"Alena",
"count":100
},
{
"name":"Levon",
"count":97
},
{
"name":"Shakira",
"count":96
},
{
"name":"Keerah",
"count":95
},
{
"name":"Anesu",
"count":92
},
{
"name":"Zishan",
"count":90
},
{
"name":"Francesco",
"count":87
},
{
"name":"Camron",
"count":87
},
{
"name":"Brannan",
"count":83
},
{
"name":"Karly",
"count":82
},
{
"name":"Ohran",
"count":81
},
{
"name":"Oswald",
"count":80
},
{
"name":"Fasai",
"count":78
},
{
"name":"Renas",
"count":76
},
{
"name":"Devon",
"count":76
},
{
"name":"McKenzie",
"count":73
},
{
"name":"Caelan",
"count":69
},
{
"name":"Ayshah",
"count":65
},
{
"name":"Lettice",
"count":64
},
{
"name":"Cariss",
"count":56
},
{
"name":"Johannes",
"count":52
},
{
"name":"Melissande",
"count":52
},
{
"name":"Yishuka",
"count":51
},
{
"name":"Tymon",
"count":51
},
{
"name":"Aleksandra",
"count":50
},
{
"name":"Macaully",
"count":49
},
{
"name":"Sharlene",
"count":48
},
{
"name":"Lucie",
"count":46
},
{
"name":"Ellyce",
"count":45
},
{
"name":"Shalamar",
"count":45
},
{
"name":"Rehaan",
"count":45
},
{
"name":"Macie",
"count":43
},
{
"name":"Harjyot",
"count":37
},
{
"name":"Jumaimah",
"count":36
},
{
"name":"Dominic",
"count":30
},
{
"name":"Shahed",
"count":30
},
{
"name":"Daumantas",
"count":29
},
{
"name":"Muhammad",
"count":28
},
{
"name":"Latisha",
"count":27
},
{
"name":"Mahan",
"count":24
},
{
"name":"Mirren",
"count":23
},
{
"name":"Farhan",
"count":21
},
{
"name":"Emmet",
"count":19
},
{
"name":"Ilysa",
"count":16
},
{
"name":"Cruz",
"count":14
},
{
"name":"Kirk",
"count":14
},
{
"name":"Greig",
"count":11
},
{
"name":"Angelo",
"count":9
},
{
"name":"Leena",
"count":9
},
{
"name":"Kaelan",
"count":8
}
]'''
info = json.loads(data)
print('User count:', len(info))
numlist = list()
for item in info:
num = item ['count']
value = int(num)
numlist.append(value)
total = sum(numlist)
print(total) | 0.159872 | 0.248854 |
from typing import Callable, Generic, TypeVar, Union, Any, Optional, cast, overload
T = TypeVar("T") # Success type
E = TypeVar("E") # Error type
F = TypeVar("F")
U = TypeVar("U")
class Result(Generic[T, E]):
"""
A simple `Result` type inspired by Rust.
Not all methods (https://doc.rust-lang.org/std/result/enum.Result.html)
have been implemented, only the ones that make sense in the Python context.
"""
def __init__(self, is_ok: bool, value: Union[T, E], force: bool = False) -> None:
"""Do not call this constructor, use the Ok or Err class methods instead.
There are no type guarantees on the value if this is called directly.
Args:
is_ok:
If this represents an ok result
value:
The value inside the result
force:
Force creation of the object. This is false by default to prevent
accidentally creating instance of a Result in an unsafe way.
"""
if force is not True:
raise RuntimeError("Don't instantiate a Result directly. "
"Use the Ok(value) and Err(error) class methods instead.")
else:
self._is_ok = is_ok
self._value = value
def __eq__(self, other: Any) -> bool:
return (self.__class__ == other.__class__ and
self.is_ok() == cast(Result, other).is_ok() and
self._value == other._value)
def __ne__(self, other: Any) -> bool:
return not (self == other)
def __hash__(self) -> int:
return hash((self.is_ok(), self._value))
def __repr__(self) -> str:
if self.is_ok():
return 'Ok({})'.format(repr(self._value))
else:
return 'Err({})'.format(repr(self._value))
@classmethod
@overload
def Ok(cls) -> 'Result[bool, Any]':
pass
@classmethod
@overload
def Ok(cls, value: T) -> 'Result[T, Any]':
pass
@classmethod
def Ok(cls, value: Any = True) -> 'Result[Any, Any]':
return cls(is_ok=True, value=value, force=True)
@classmethod
def Err(cls, error: E) -> 'Result[Any, E]':
return cls(is_ok=False, value=error, force=True)
def is_ok(self) -> bool:
return self._is_ok
def is_err(self) -> bool:
return not self._is_ok
def ok(self) -> Optional[T]:
"""
Return the value if it is an `Ok` type. Return `None` if it is an
`Err`.
"""
return cast(T, self._value) if self.is_ok() else None
def err(self) -> Optional[E]:
"""
Return the error if this is an `Err` type. Return `None` otherwise.
"""
return cast(E, self._value) if self.is_err() else None
@property
def value(self) -> Union[T, E]:
"""
Return the inner value. This might be either the ok or the error type.
"""
return self._value
def expect(self, message: str) -> T:
"""
Return the value if it is an `Ok` type. Raises an `UnwrapError` if it
is an `Err`.
"""
if self._is_ok:
return cast(T, self._value)
else:
raise UnwrapError(message)
def expect_err(self, message: str) -> E:
"""
Return the value if it is an `Err` type. Raises an `UnwrapError` if it
is `Ok`.
"""
if self._is_ok:
raise UnwrapError(message)
return cast(E, self._value)
def unwrap(self) -> T:
"""
Return the value if it is an `Ok` type. Raises an `UnwrapError` if it
is an `Err`.
"""
return self.expect("Called `Result.unwrap()` on an `Err` value")
def unwrap_err(self) -> E:
"""
Return the value if it is an `Err` type. Raises an `UnwrapError` if it
is `Ok`.
"""
return self.expect_err("Called `Result.unwrap_err()` on an `Ok` value")
def unwrap_or(self, default: T) -> T:
"""
Return the value if it is an `Ok` type. Return `default` if it is an
`Err`.
"""
if self._is_ok:
return cast(T, self._value)
else:
return default
def map(self, op: Callable[[T], U]) -> 'Result[U, E]':
"""
If contained result is `Ok`, return `Ok` with original value mapped to
a new value using the passed in function. Otherwise return `Err` with
same value.
"""
if not self._is_ok:
return cast(Result[U, E], self)
return Ok(op(cast(T, self._value)))
def map_or(self, default: U, op: Callable[[T], U]) -> U:
"""
If contained result is `Ok`, return the original value mapped to a new
value using the passed in function. Otherwise return the default value.
"""
if not self._is_ok:
return default
return op(cast(T, self._value))
def map_or_else(
self,
default_op: Callable[[], U],
op: Callable[[T], U]
) -> U:
"""
If contained result is `Ok`, return original value mapped to
a new value using the passed in `op` function. Otherwise use `default_op`
to compute a default value.
"""
if not self._is_ok:
return default_op()
return op(cast(T, self._value))
def map_err(self, op: Callable[[E], F]) -> 'Result[T, F]':
"""
If contained result is `Err`, return `Err` with original value mapped
to a new value using the passed in `op` function. Otherwise return `Ok`
with the same value.
"""
if self._is_ok:
return cast(Result[T, F], self)
return Err(op(cast(E, self._value)))
@overload
def Ok() -> Result[bool, Any]:
pass
@overload
def Ok(value: T) -> Result[T, Any]:
pass
def Ok(value: Any = True) -> Result[Any, Any]:
"""
Shortcut function to create a new Result.
"""
return Result.Ok(value)
def Err(error: E) -> Result[Any, E]:
"""
Shortcut function to create a new Result.
"""
return Result.Err(error)
class UnwrapError(Exception):
pass | result/result.py | from typing import Callable, Generic, TypeVar, Union, Any, Optional, cast, overload
T = TypeVar("T") # Success type
E = TypeVar("E") # Error type
F = TypeVar("F")
U = TypeVar("U")
class Result(Generic[T, E]):
"""
A simple `Result` type inspired by Rust.
Not all methods (https://doc.rust-lang.org/std/result/enum.Result.html)
have been implemented, only the ones that make sense in the Python context.
"""
def __init__(self, is_ok: bool, value: Union[T, E], force: bool = False) -> None:
"""Do not call this constructor, use the Ok or Err class methods instead.
There are no type guarantees on the value if this is called directly.
Args:
is_ok:
If this represents an ok result
value:
The value inside the result
force:
Force creation of the object. This is false by default to prevent
accidentally creating instance of a Result in an unsafe way.
"""
if force is not True:
raise RuntimeError("Don't instantiate a Result directly. "
"Use the Ok(value) and Err(error) class methods instead.")
else:
self._is_ok = is_ok
self._value = value
def __eq__(self, other: Any) -> bool:
return (self.__class__ == other.__class__ and
self.is_ok() == cast(Result, other).is_ok() and
self._value == other._value)
def __ne__(self, other: Any) -> bool:
return not (self == other)
def __hash__(self) -> int:
return hash((self.is_ok(), self._value))
def __repr__(self) -> str:
if self.is_ok():
return 'Ok({})'.format(repr(self._value))
else:
return 'Err({})'.format(repr(self._value))
@classmethod
@overload
def Ok(cls) -> 'Result[bool, Any]':
pass
@classmethod
@overload
def Ok(cls, value: T) -> 'Result[T, Any]':
pass
@classmethod
def Ok(cls, value: Any = True) -> 'Result[Any, Any]':
return cls(is_ok=True, value=value, force=True)
@classmethod
def Err(cls, error: E) -> 'Result[Any, E]':
return cls(is_ok=False, value=error, force=True)
def is_ok(self) -> bool:
return self._is_ok
def is_err(self) -> bool:
return not self._is_ok
def ok(self) -> Optional[T]:
"""
Return the value if it is an `Ok` type. Return `None` if it is an
`Err`.
"""
return cast(T, self._value) if self.is_ok() else None
def err(self) -> Optional[E]:
"""
Return the error if this is an `Err` type. Return `None` otherwise.
"""
return cast(E, self._value) if self.is_err() else None
@property
def value(self) -> Union[T, E]:
"""
Return the inner value. This might be either the ok or the error type.
"""
return self._value
def expect(self, message: str) -> T:
"""
Return the value if it is an `Ok` type. Raises an `UnwrapError` if it
is an `Err`.
"""
if self._is_ok:
return cast(T, self._value)
else:
raise UnwrapError(message)
def expect_err(self, message: str) -> E:
"""
Return the value if it is an `Err` type. Raises an `UnwrapError` if it
is `Ok`.
"""
if self._is_ok:
raise UnwrapError(message)
return cast(E, self._value)
def unwrap(self) -> T:
"""
Return the value if it is an `Ok` type. Raises an `UnwrapError` if it
is an `Err`.
"""
return self.expect("Called `Result.unwrap()` on an `Err` value")
def unwrap_err(self) -> E:
"""
Return the value if it is an `Err` type. Raises an `UnwrapError` if it
is `Ok`.
"""
return self.expect_err("Called `Result.unwrap_err()` on an `Ok` value")
def unwrap_or(self, default: T) -> T:
"""
Return the value if it is an `Ok` type. Return `default` if it is an
`Err`.
"""
if self._is_ok:
return cast(T, self._value)
else:
return default
def map(self, op: Callable[[T], U]) -> 'Result[U, E]':
"""
If contained result is `Ok`, return `Ok` with original value mapped to
a new value using the passed in function. Otherwise return `Err` with
same value.
"""
if not self._is_ok:
return cast(Result[U, E], self)
return Ok(op(cast(T, self._value)))
def map_or(self, default: U, op: Callable[[T], U]) -> U:
"""
If contained result is `Ok`, return the original value mapped to a new
value using the passed in function. Otherwise return the default value.
"""
if not self._is_ok:
return default
return op(cast(T, self._value))
def map_or_else(
self,
default_op: Callable[[], U],
op: Callable[[T], U]
) -> U:
"""
If contained result is `Ok`, return original value mapped to
a new value using the passed in `op` function. Otherwise use `default_op`
to compute a default value.
"""
if not self._is_ok:
return default_op()
return op(cast(T, self._value))
def map_err(self, op: Callable[[E], F]) -> 'Result[T, F]':
"""
If contained result is `Err`, return `Err` with original value mapped
to a new value using the passed in `op` function. Otherwise return `Ok`
with the same value.
"""
if self._is_ok:
return cast(Result[T, F], self)
return Err(op(cast(E, self._value)))
@overload
def Ok() -> Result[bool, Any]:
pass
@overload
def Ok(value: T) -> Result[T, Any]:
pass
def Ok(value: Any = True) -> Result[Any, Any]:
"""
Shortcut function to create a new Result.
"""
return Result.Ok(value)
def Err(error: E) -> Result[Any, E]:
"""
Shortcut function to create a new Result.
"""
return Result.Err(error)
class UnwrapError(Exception):
pass | 0.951278 | 0.484929 |
import optparse, os, shutil, subprocess, sys, tempfile
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def main():
#Parse command line
parser = optparse.OptionParser()
parser.add_option("", "--max_read_length", dest="max_read_length", help="Maximum read length")
#Make list of params
parser.add_option("", "--avg_ins", action="append", type="string", dest="avg_insert_list", help="Average insert size")
parser.add_option("", "--reverse_seq", action="append", type="string", dest="reverse_seq_list", help="Reverse sequence?")
parser.add_option("", "--asm_flags", action="append", type="string", dest="asm_flags_list", help="Which operations should the reads be used for?")
parser.add_option("", "--rd_len_cutoff", action="append", type="string", dest="rd_len_cutoff_list",
help="Number of base pairs to use from reads")
parser.add_option("", "--rank", action="append", type="string", dest="rank_list", help="Which order are the reads used while scaffolding")
parser.add_option("", "--pair_num_cutoff", action="append", type="string", dest="pair_num_cutoff_list",
help="Pair number cutoff for a reliable connection")
parser.add_option("", "--map_len", action="append", type="string", dest="map_len_list",
help="Length of contig to be aligned for a reliable read location")
#Data inputs
parser.add_option("", "--type_of_data", action="append", type="string", dest="type_of_data_list")
parser.add_option("", "--format_of_data", action="append", type="string", dest="format_of_data_list")
parser.add_option("", "--single_fastq_input1", action="append", type="string", dest="single_fastq_input1_list")
parser.add_option("", "--single_fasta_input1", action="append", type="string", dest="single_fasta_input1_list")
parser.add_option("", "--single_bam_input1", action="append", type="string", dest="single_bam_input1_list")
parser.add_option("", "--paired_fastq_input1", action="append", type="string", dest="paired_fastq_input1_list")
parser.add_option("", "--paired_fastq_input2", action="append", type="string", dest="paired_fastq_input2_list")
parser.add_option("", "--paired_fasta_input1", action="append", type="string", dest="paired_fasta_input1_list")
parser.add_option("", "--paired_fasta_input2", action="append", type="string", dest="paired_fasta_input2_list")
parser.add_option("", "--paired_bam_input1", action="append", type="string", dest="paired_bam_input1_list")
parser.add_option("", "--paired_bam_input2", action="append", type="string", dest="paired_bam_input2_list")
parser.add_option("", "--analysis_settings_type", dest="analysis_settings_type")
#Outputs
parser.add_option("", "--soap_config", dest='soap_config')
opts, args = parser.parse_args()
#Need a temporary directory to perform processing
dirpath = tempfile.mkdtemp()
#Create temp file to store soapdenovo2 running configuration
config_file = tempfile.NamedTemporaryFile(dir=dirpath, prefix="soap_",suffix=".config").name
try:
fout = open(config_file,'w')
fout.write("max_rd_len=%s\n" % opts.max_read_length)
#Calculate how many sets of data there are - use avg_ins as a measure of this
#Separate indices required to keep count of reads
single_read_index = 0
paired_read_index = 0
for index in range(len(opts.avg_insert_list)):
fout.write("[LIB]\n")
fout.write("avg_ins=%s\n" % (opts.avg_insert_list)[index])
fout.write("reverse_seq=%s\n" % opts.reverse_seq_list[index])
fout.write("asm_flags=%s\n" % opts.asm_flags_list[index])
fout.write("rd_len_cutoff=%s\n" % opts.rd_len_cutoff_list[index])
fout.write("rank=%s\n" % opts.rank_list[index])
fout.write("pair_num_cutoff=%s\n" % opts.pair_num_cutoff_list[index])
fout.write("map_len=%s\n" % opts.map_len_list[index])
#Add data file configuration - needs careful looping due to single and paired reads
print opts.type_of_data_list[index]
print opts.format_of_data_list[index]
if opts.type_of_data_list[index] == "single": #then only one read
if (opts.format_of_data_list)[index] == "fastq":
fout.write("q=%s\n" % (opts.single_fastq_input1_list)[single_read_index])
elif opts.format_of_data == "fasta":
fout.write("f=%s\n" % opts.single_fasta_input1_list[single_read_index])
else:
fout.write("b=%s\n" % opts.single_bam_input1_list[single_read_index])
single_read_index = single_read_index + 1
elif opts.type_of_data_list[index] == "paired":
if opts.format_of_data_list[index] == "fastq":
fout.write("q1=%s\n" % (opts.paired_fastq_input1_list)[paired_read_index])
fout.write("q2=%s\n" % (opts.paired_fastq_input2_list)[paired_read_index])
elif opts.format_of_data_list[index] == "fasta":
fout.write("f1=%s\n" % opts.paired_fasta_input1_list[paired_read_index])
fout.write("f2=%s\n" % opts.paired_fasta_input2_list[paired_read_index])
else:
fout.write("b1=%s\n" % opts.paired_fasta_input1_list[paired_read_index])
fout.write("b2=%s\n" % opts.paired_fasta_input2_list[paired_read_index])
paired_read_index = paired_read_index + 1
fout.close()
except Exception, e:
stop_err("File cannot be opened for writing soap.config " + str(e))
config_out = open(opts.soap_config, 'wb')
file = open(config_file)
for line in file:
#print line
config_out.write(line)
config_out.close()
file.close()
#Clean up temp files
#cleanup_before_exit(tmp_dir)
#Check results in output file
if os.path.getsize(opts.soap_config) > 0:
sys.stdout.write('Status complete')
else:
stop_err("The output is empty")
if __name__ == "__main__": main() | tools/soap/soapdenovo_configuration.py | import optparse, os, shutil, subprocess, sys, tempfile
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def main():
#Parse command line
parser = optparse.OptionParser()
parser.add_option("", "--max_read_length", dest="max_read_length", help="Maximum read length")
#Make list of params
parser.add_option("", "--avg_ins", action="append", type="string", dest="avg_insert_list", help="Average insert size")
parser.add_option("", "--reverse_seq", action="append", type="string", dest="reverse_seq_list", help="Reverse sequence?")
parser.add_option("", "--asm_flags", action="append", type="string", dest="asm_flags_list", help="Which operations should the reads be used for?")
parser.add_option("", "--rd_len_cutoff", action="append", type="string", dest="rd_len_cutoff_list",
help="Number of base pairs to use from reads")
parser.add_option("", "--rank", action="append", type="string", dest="rank_list", help="Which order are the reads used while scaffolding")
parser.add_option("", "--pair_num_cutoff", action="append", type="string", dest="pair_num_cutoff_list",
help="Pair number cutoff for a reliable connection")
parser.add_option("", "--map_len", action="append", type="string", dest="map_len_list",
help="Length of contig to be aligned for a reliable read location")
#Data inputs
parser.add_option("", "--type_of_data", action="append", type="string", dest="type_of_data_list")
parser.add_option("", "--format_of_data", action="append", type="string", dest="format_of_data_list")
parser.add_option("", "--single_fastq_input1", action="append", type="string", dest="single_fastq_input1_list")
parser.add_option("", "--single_fasta_input1", action="append", type="string", dest="single_fasta_input1_list")
parser.add_option("", "--single_bam_input1", action="append", type="string", dest="single_bam_input1_list")
parser.add_option("", "--paired_fastq_input1", action="append", type="string", dest="paired_fastq_input1_list")
parser.add_option("", "--paired_fastq_input2", action="append", type="string", dest="paired_fastq_input2_list")
parser.add_option("", "--paired_fasta_input1", action="append", type="string", dest="paired_fasta_input1_list")
parser.add_option("", "--paired_fasta_input2", action="append", type="string", dest="paired_fasta_input2_list")
parser.add_option("", "--paired_bam_input1", action="append", type="string", dest="paired_bam_input1_list")
parser.add_option("", "--paired_bam_input2", action="append", type="string", dest="paired_bam_input2_list")
parser.add_option("", "--analysis_settings_type", dest="analysis_settings_type")
#Outputs
parser.add_option("", "--soap_config", dest='soap_config')
opts, args = parser.parse_args()
#Need a temporary directory to perform processing
dirpath = tempfile.mkdtemp()
#Create temp file to store soapdenovo2 running configuration
config_file = tempfile.NamedTemporaryFile(dir=dirpath, prefix="soap_",suffix=".config").name
try:
fout = open(config_file,'w')
fout.write("max_rd_len=%s\n" % opts.max_read_length)
#Calculate how many sets of data there are - use avg_ins as a measure of this
#Separate indices required to keep count of reads
single_read_index = 0
paired_read_index = 0
for index in range(len(opts.avg_insert_list)):
fout.write("[LIB]\n")
fout.write("avg_ins=%s\n" % (opts.avg_insert_list)[index])
fout.write("reverse_seq=%s\n" % opts.reverse_seq_list[index])
fout.write("asm_flags=%s\n" % opts.asm_flags_list[index])
fout.write("rd_len_cutoff=%s\n" % opts.rd_len_cutoff_list[index])
fout.write("rank=%s\n" % opts.rank_list[index])
fout.write("pair_num_cutoff=%s\n" % opts.pair_num_cutoff_list[index])
fout.write("map_len=%s\n" % opts.map_len_list[index])
#Add data file configuration - needs careful looping due to single and paired reads
print opts.type_of_data_list[index]
print opts.format_of_data_list[index]
if opts.type_of_data_list[index] == "single": #then only one read
if (opts.format_of_data_list)[index] == "fastq":
fout.write("q=%s\n" % (opts.single_fastq_input1_list)[single_read_index])
elif opts.format_of_data == "fasta":
fout.write("f=%s\n" % opts.single_fasta_input1_list[single_read_index])
else:
fout.write("b=%s\n" % opts.single_bam_input1_list[single_read_index])
single_read_index = single_read_index + 1
elif opts.type_of_data_list[index] == "paired":
if opts.format_of_data_list[index] == "fastq":
fout.write("q1=%s\n" % (opts.paired_fastq_input1_list)[paired_read_index])
fout.write("q2=%s\n" % (opts.paired_fastq_input2_list)[paired_read_index])
elif opts.format_of_data_list[index] == "fasta":
fout.write("f1=%s\n" % opts.paired_fasta_input1_list[paired_read_index])
fout.write("f2=%s\n" % opts.paired_fasta_input2_list[paired_read_index])
else:
fout.write("b1=%s\n" % opts.paired_fasta_input1_list[paired_read_index])
fout.write("b2=%s\n" % opts.paired_fasta_input2_list[paired_read_index])
paired_read_index = paired_read_index + 1
fout.close()
except Exception, e:
stop_err("File cannot be opened for writing soap.config " + str(e))
config_out = open(opts.soap_config, 'wb')
file = open(config_file)
for line in file:
#print line
config_out.write(line)
config_out.close()
file.close()
#Clean up temp files
#cleanup_before_exit(tmp_dir)
#Check results in output file
if os.path.getsize(opts.soap_config) > 0:
sys.stdout.write('Status complete')
else:
stop_err("The output is empty")
if __name__ == "__main__": main() | 0.117092 | 0.156427 |
from collections import deque
from re import S
import yaml
import numpy as np
with open('config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
seed = cfg['setup']['seed']
ymlfile.close()
np.random.seed(seed)
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
tf.random.set_seed(seed)
from utils.deepnetwork import DeepNetwork
from utils.memorybuffer import Buffer
class LPPO:
def __init__(self, env, info):
self.env = env
self.c_limit = 25
self.pi = DeepNetwork.build(env, info['actor'], actor=True, name='actor')
self.pi_opt = Adam(learning_rate=info['pi_lr'])
self.v= DeepNetwork.build(env, info['critic'], name='critic')
self.v_opt = Adam(learning_rate=info['vf_lr'])
self.vc = DeepNetwork.build(env, info['critic'], name='critic')
self.vc_opt = Adam(learning_rate=info['vf_lr'])
penalty_init = info['penalty']
self.penalty = tf.Variable(np.log(max(np.exp(penalty_init)-1, 1e-8)), trainable=True, dtype=tf.float32)
self.penalty_opt = Adam(learning_rate=info['penalty_lr'])
self.buffer = Buffer(info['steps_per_epoch'])
def run_actor(self, s, logstd=-0.5):
std = np.exp(logstd)
s = np.array([s])
mu = self.pi(s).numpy()[0]
a = np.random.normal(loc=mu, scale=std)
v = self.v(s).numpy().squeeze()
vc = self.vc(s).numpy().squeeze()
logp = -0.5 * ( ((a - mu)/(std+1e-10))**2 + 2 * logstd + np.log(2 * np.pi))
logp = np.sum(logp)
return a, mu, v, vc, logp
def update(self, info, mean_cost, logstd=-0.5):
"""Prepare the samples and the cumulative reward to update the network
Args:
Returns:
None
"""
with tf.GradientTape() as tape_m:
penalty_loss = tf.multiply(-self.penalty, (mean_cost - self.c_limit))
penalty_grad = tape_m.gradient(penalty_loss, [self.penalty])
self.penalty_opt.apply_gradients(zip(penalty_grad, [self.penalty]))
s, a_old, mu_old, logp_old, adv, cadv, ret, cret = self.buffer.sample()
clip = info['clip']
target_kl = 0.01
for i in range(info['pi_iters']):
with tf.GradientTape() as tape_pi:
std = np.exp(logstd)
mu = self.pi(s)
logp = -0.5 * ( ((a_old - mu)/(std+1e-10))**2 + 2 * logstd + np.log(2 * np.pi))
logp = tf.reduce_sum(logp, axis=1)
ratio = tf.exp(logp - logp_old)
clip_adv = tf.where(adv > 0, (1+clip)*adv, (1-clip)*adv)
surr_adv = tf.reduce_mean(tf.minimum(ratio*adv, clip_adv))
surr_cadv = tf.reduce_mean(ratio*cadv)
penalty = tf.nn.softplus(self.penalty).numpy()
pi_obj = (surr_adv - penalty * surr_cadv) / (1 + penalty)
pi_loss = -pi_obj
pi_grad = tape_pi.gradient(pi_loss, self.pi.trainable_variables)
self.pi_opt.apply_gradients(zip(pi_grad, self.pi.trainable_variables))
var = tf.exp(2 * logstd)
mu_ = self.pi(s)
pre_sum = 0.5 * ( ((mu_old - mu_)**2 + var) / (var + 1e-10) - 1)
kls = tf.reduce_sum(pre_sum, axis=1)
kl = tf.reduce_mean(kls).numpy()
if kl > 1.2 * target_kl:
print(f"Early stopping at iteration {i} due to reaching max kl")
break
for i in range(info['vf_iters']):
with tf.GradientTape() as tape_v, tf.GradientTape() as tape_vc:
v = self.v(s)
v_loss = tf.reduce_mean((ret - v)**2)
v_grad = tape_v.gradient(v_loss, self.v.trainable_variables)
self.v_opt.apply_gradients(zip(v_grad, self.v.trainable_variables))
vc = self.vc(s)
vc_loss = tf.reduce_mean((cret - vc)**2)
vc_grad = tape_vc.gradient(vc_loss, self.vc.trainable_variables)
self.vc_opt.apply_gradients(zip(vc_grad, self.vc.trainable_variables))
self.buffer.clear()
def round_obs(self, obs):
obs[:3] *= 0.1 # Normalize the Accelerometer inputs
return np.around(obs, decimals=3)
def train(self, tracker, info):
r_mean, c_mean = deque(maxlen=100), deque(maxlen=100)
c_tracker = deque(maxlen=info['steps_per_epoch'])
n_step, steps_per_epoch = info['n_step'], info['steps_per_epoch']
epochs = int(n_step / steps_per_epoch)
ep_len = 1000
ep_r, ep_c, steps, tot_steps = 0, 0, 0, 0
s = self.env.reset()
s = self.round_obs(s)
for _ in range(epochs):
for t in range(steps_per_epoch):
a, mu, v, vc, logp = self.run_actor(s)
s_, r, d, i = self.env.step([a])
s_ = self.round_obs(s_)
c = int(i['cost'])
self.buffer.store(s, a, mu, r, v, c, vc, logp)
ep_r += r
ep_c += c
steps += 1
tot_steps += 1
s = s_
if d or steps == ep_len:
e = int(tot_steps / ep_len)
r_mean.append(ep_r)
c_mean.append(ep_c)
c_tracker.append(ep_c)
tracker.update([e, ep_r, ep_c, self.penalty.numpy()])
s = np.array([s])
last_v = self.v(s).numpy().squeeze()
last_vc = self.vc(s).numpy().squeeze()
print(f'E: {e}, R: {ep_r:.3f}, C: {ep_c}, P: {tf.nn.softplus(self.penalty).numpy():.4f}, MeanR: {np.mean(r_mean):.3f}, MeanC: {np.mean(c_mean):.3f}')
self.buffer.compute_mc(steps, last_v, last_vc)
ep_r, ep_c, steps = 0, 0, 0
s = self.env.reset()
s = self.round_obs(s)
self.update(info, np.mean(c_tracker))
tracker.save_metrics() | agent.py | from collections import deque
from re import S
import yaml
import numpy as np
with open('config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
seed = cfg['setup']['seed']
ymlfile.close()
np.random.seed(seed)
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
tf.random.set_seed(seed)
from utils.deepnetwork import DeepNetwork
from utils.memorybuffer import Buffer
class LPPO:
def __init__(self, env, info):
self.env = env
self.c_limit = 25
self.pi = DeepNetwork.build(env, info['actor'], actor=True, name='actor')
self.pi_opt = Adam(learning_rate=info['pi_lr'])
self.v= DeepNetwork.build(env, info['critic'], name='critic')
self.v_opt = Adam(learning_rate=info['vf_lr'])
self.vc = DeepNetwork.build(env, info['critic'], name='critic')
self.vc_opt = Adam(learning_rate=info['vf_lr'])
penalty_init = info['penalty']
self.penalty = tf.Variable(np.log(max(np.exp(penalty_init)-1, 1e-8)), trainable=True, dtype=tf.float32)
self.penalty_opt = Adam(learning_rate=info['penalty_lr'])
self.buffer = Buffer(info['steps_per_epoch'])
def run_actor(self, s, logstd=-0.5):
std = np.exp(logstd)
s = np.array([s])
mu = self.pi(s).numpy()[0]
a = np.random.normal(loc=mu, scale=std)
v = self.v(s).numpy().squeeze()
vc = self.vc(s).numpy().squeeze()
logp = -0.5 * ( ((a - mu)/(std+1e-10))**2 + 2 * logstd + np.log(2 * np.pi))
logp = np.sum(logp)
return a, mu, v, vc, logp
def update(self, info, mean_cost, logstd=-0.5):
"""Prepare the samples and the cumulative reward to update the network
Args:
Returns:
None
"""
with tf.GradientTape() as tape_m:
penalty_loss = tf.multiply(-self.penalty, (mean_cost - self.c_limit))
penalty_grad = tape_m.gradient(penalty_loss, [self.penalty])
self.penalty_opt.apply_gradients(zip(penalty_grad, [self.penalty]))
s, a_old, mu_old, logp_old, adv, cadv, ret, cret = self.buffer.sample()
clip = info['clip']
target_kl = 0.01
for i in range(info['pi_iters']):
with tf.GradientTape() as tape_pi:
std = np.exp(logstd)
mu = self.pi(s)
logp = -0.5 * ( ((a_old - mu)/(std+1e-10))**2 + 2 * logstd + np.log(2 * np.pi))
logp = tf.reduce_sum(logp, axis=1)
ratio = tf.exp(logp - logp_old)
clip_adv = tf.where(adv > 0, (1+clip)*adv, (1-clip)*adv)
surr_adv = tf.reduce_mean(tf.minimum(ratio*adv, clip_adv))
surr_cadv = tf.reduce_mean(ratio*cadv)
penalty = tf.nn.softplus(self.penalty).numpy()
pi_obj = (surr_adv - penalty * surr_cadv) / (1 + penalty)
pi_loss = -pi_obj
pi_grad = tape_pi.gradient(pi_loss, self.pi.trainable_variables)
self.pi_opt.apply_gradients(zip(pi_grad, self.pi.trainable_variables))
var = tf.exp(2 * logstd)
mu_ = self.pi(s)
pre_sum = 0.5 * ( ((mu_old - mu_)**2 + var) / (var + 1e-10) - 1)
kls = tf.reduce_sum(pre_sum, axis=1)
kl = tf.reduce_mean(kls).numpy()
if kl > 1.2 * target_kl:
print(f"Early stopping at iteration {i} due to reaching max kl")
break
for i in range(info['vf_iters']):
with tf.GradientTape() as tape_v, tf.GradientTape() as tape_vc:
v = self.v(s)
v_loss = tf.reduce_mean((ret - v)**2)
v_grad = tape_v.gradient(v_loss, self.v.trainable_variables)
self.v_opt.apply_gradients(zip(v_grad, self.v.trainable_variables))
vc = self.vc(s)
vc_loss = tf.reduce_mean((cret - vc)**2)
vc_grad = tape_vc.gradient(vc_loss, self.vc.trainable_variables)
self.vc_opt.apply_gradients(zip(vc_grad, self.vc.trainable_variables))
self.buffer.clear()
def round_obs(self, obs):
obs[:3] *= 0.1 # Normalize the Accelerometer inputs
return np.around(obs, decimals=3)
def train(self, tracker, info):
r_mean, c_mean = deque(maxlen=100), deque(maxlen=100)
c_tracker = deque(maxlen=info['steps_per_epoch'])
n_step, steps_per_epoch = info['n_step'], info['steps_per_epoch']
epochs = int(n_step / steps_per_epoch)
ep_len = 1000
ep_r, ep_c, steps, tot_steps = 0, 0, 0, 0
s = self.env.reset()
s = self.round_obs(s)
for _ in range(epochs):
for t in range(steps_per_epoch):
a, mu, v, vc, logp = self.run_actor(s)
s_, r, d, i = self.env.step([a])
s_ = self.round_obs(s_)
c = int(i['cost'])
self.buffer.store(s, a, mu, r, v, c, vc, logp)
ep_r += r
ep_c += c
steps += 1
tot_steps += 1
s = s_
if d or steps == ep_len:
e = int(tot_steps / ep_len)
r_mean.append(ep_r)
c_mean.append(ep_c)
c_tracker.append(ep_c)
tracker.update([e, ep_r, ep_c, self.penalty.numpy()])
s = np.array([s])
last_v = self.v(s).numpy().squeeze()
last_vc = self.vc(s).numpy().squeeze()
print(f'E: {e}, R: {ep_r:.3f}, C: {ep_c}, P: {tf.nn.softplus(self.penalty).numpy():.4f}, MeanR: {np.mean(r_mean):.3f}, MeanC: {np.mean(c_mean):.3f}')
self.buffer.compute_mc(steps, last_v, last_vc)
ep_r, ep_c, steps = 0, 0, 0
s = self.env.reset()
s = self.round_obs(s)
self.update(info, np.mean(c_tracker))
tracker.save_metrics() | 0.742141 | 0.290893 |
import numpy
import numpy.testing
import algopy
def utpm2dirs(u):
"""
Vbar = utpm2dirs(u)
where u is an UTPM instance with
u.data.shape = (D,P) + shp
and V.shape == shp + (P,D)
"""
axes = tuple( numpy.arange(2,u.data.ndim))+ (1,0)
Vbar = u.data.transpose(axes)
return Vbar
def utpm2base_and_dirs(u):
"""
x,V = utpm2base_and_dirs(u)
where u is an UTPM instance with
u.data.shape = (D+1,P) + shp
then x.shape == shp
and V.shape == shp + (P,D)
"""
D,P = u.data.shape[:2]
D -= 1
shp = u.data.shape[2:]
x = numpy.zeros(shp)
V = numpy.zeros(shp+(P,D))
x[...] = u.data[0,0,...]
V[...] = u.data[1:,...].transpose( tuple(2+numpy.arange(len(shp))) + (1,0))
return x,V
def base_and_dirs2utpm(x,V):
"""
x_utpm = base_and_dirs2utpm(x,V)
where x_utpm is an instance of UTPM
V.shape = x.shape + (P,D)
then x_utpm.data.shape = (D+1,P) = x.shape
"""
x = numpy.asarray(x)
V = numpy.asarray(V)
xshp = x.shape
Vshp = V.shape
P,D = Vshp[-2:]
Nxshp = len(xshp)
NVshp = len(Vshp)
numpy.testing.assert_array_equal(xshp, Vshp[:-2], err_msg = 'x.shape does not match V.shape')
tc = numpy.zeros((D+1,P) + xshp)
for p in range(P):
tc[0,p,...] = x[...]
axes_ids = tuple(numpy.arange(NVshp))
tc[1:,...] = V.transpose((axes_ids[-1],axes_ids[-2]) + axes_ids[:-2])
return algopy.UTPM(tc)
def ndarray2utpm(A):
""" returns an UTPM instance from an array_like instance A with UTPM elements"""
from .globalfuncs import zeros
shp = numpy.shape(A)
A = numpy.ravel(A)
retval = zeros(shp,dtype=A[0])
for na, a in enumerate(A):
retval[na] = a
return retval
def symvec(A, UPLO='F'):
""" returns the distinct elements of a symmetrized square matrix A
as vector
Parameters
----------
A: array_like
symmetric matrix stored in UPLO format
UPLO: string
UPLO = 'F' fully populated symmetric matrix
UPLO = 'L' only the lower triangular part defines A
UPLO = 'U' only the upper triangular part defines A
Example 1:
~~~~~~~~~~
A = [[0,1,2],[1,3,4],[2,4,5]]
v = symvec(A)
returns v = [0,1,2,3,4,5]
Example 2:
~~~~~~~~~~
A = [[1,2],[3,4]]
is not symmetric and symmetrized, yielding
v = [1, (2+3)/2, 4]
as output
"""
from .globalfuncs import zeros
N,M = A.shape
assert N == M
v = zeros( ((N+1)*N)//2, dtype=A)
if UPLO=='F':
count = 0
for row in range(N):
for col in range(row,N):
v[count] = 0.5* (A[row,col] + A[col,row])
count +=1
elif UPLO=='L':
count = 0
for n in range(N):
for m in range(n,N):
v[count] = A[m,n]
count +=1
elif UPLO=='U':
count = 0
for n in range(N):
for m in range(n,N):
v[count] = A[n,m]
count +=1
else:
err_str = "UPLO must be either 'F','L', or 'U'\n"
err_str+= "however, provided UPLO=%s"%UPLO
raise ValueError(err_str)
return v
def vecsym(v):
"""
returns a full symmetric matrix filled
the distinct elements of v, filled row-wise
"""
from .globalfuncs import zeros
Nv = v.size
N = (int(numpy.sqrt(1 + 8*Nv)) - 1)//2
A = zeros( (N,N), dtype=v)
count = 0
for row in range(N):
for col in range(row,N):
A[row,col] = A[col,row] = v[count]
count +=1
return A
def piv2mat(piv):
"""
convert a pivot indices as returned by scipy.linalg.lu_factor into
a permutation matrix
"""
N = len(piv)
swap = numpy.arange(N)
for i in range(N):
tmp = swap[i]
swap[i] = swap[piv[i]]
swap[piv[i]] = tmp
return numpy.eye(N)[:, swap]
def piv2det(piv):
"""
computes the determinant of the permutation matrix that is defined by pivot indices as returned by scipy.linalg.lu_factor
"""
N = len(piv)
piv = numpy.array(piv)
# print piv != numpy.arange(N)
return (-1)**(numpy.sum(piv != numpy.arange(N))%2) | algopy/utils.py | import numpy
import numpy.testing
import algopy
def utpm2dirs(u):
"""
Vbar = utpm2dirs(u)
where u is an UTPM instance with
u.data.shape = (D,P) + shp
and V.shape == shp + (P,D)
"""
axes = tuple( numpy.arange(2,u.data.ndim))+ (1,0)
Vbar = u.data.transpose(axes)
return Vbar
def utpm2base_and_dirs(u):
"""
x,V = utpm2base_and_dirs(u)
where u is an UTPM instance with
u.data.shape = (D+1,P) + shp
then x.shape == shp
and V.shape == shp + (P,D)
"""
D,P = u.data.shape[:2]
D -= 1
shp = u.data.shape[2:]
x = numpy.zeros(shp)
V = numpy.zeros(shp+(P,D))
x[...] = u.data[0,0,...]
V[...] = u.data[1:,...].transpose( tuple(2+numpy.arange(len(shp))) + (1,0))
return x,V
def base_and_dirs2utpm(x,V):
"""
x_utpm = base_and_dirs2utpm(x,V)
where x_utpm is an instance of UTPM
V.shape = x.shape + (P,D)
then x_utpm.data.shape = (D+1,P) = x.shape
"""
x = numpy.asarray(x)
V = numpy.asarray(V)
xshp = x.shape
Vshp = V.shape
P,D = Vshp[-2:]
Nxshp = len(xshp)
NVshp = len(Vshp)
numpy.testing.assert_array_equal(xshp, Vshp[:-2], err_msg = 'x.shape does not match V.shape')
tc = numpy.zeros((D+1,P) + xshp)
for p in range(P):
tc[0,p,...] = x[...]
axes_ids = tuple(numpy.arange(NVshp))
tc[1:,...] = V.transpose((axes_ids[-1],axes_ids[-2]) + axes_ids[:-2])
return algopy.UTPM(tc)
def ndarray2utpm(A):
""" returns an UTPM instance from an array_like instance A with UTPM elements"""
from .globalfuncs import zeros
shp = numpy.shape(A)
A = numpy.ravel(A)
retval = zeros(shp,dtype=A[0])
for na, a in enumerate(A):
retval[na] = a
return retval
def symvec(A, UPLO='F'):
""" returns the distinct elements of a symmetrized square matrix A
as vector
Parameters
----------
A: array_like
symmetric matrix stored in UPLO format
UPLO: string
UPLO = 'F' fully populated symmetric matrix
UPLO = 'L' only the lower triangular part defines A
UPLO = 'U' only the upper triangular part defines A
Example 1:
~~~~~~~~~~
A = [[0,1,2],[1,3,4],[2,4,5]]
v = symvec(A)
returns v = [0,1,2,3,4,5]
Example 2:
~~~~~~~~~~
A = [[1,2],[3,4]]
is not symmetric and symmetrized, yielding
v = [1, (2+3)/2, 4]
as output
"""
from .globalfuncs import zeros
N,M = A.shape
assert N == M
v = zeros( ((N+1)*N)//2, dtype=A)
if UPLO=='F':
count = 0
for row in range(N):
for col in range(row,N):
v[count] = 0.5* (A[row,col] + A[col,row])
count +=1
elif UPLO=='L':
count = 0
for n in range(N):
for m in range(n,N):
v[count] = A[m,n]
count +=1
elif UPLO=='U':
count = 0
for n in range(N):
for m in range(n,N):
v[count] = A[n,m]
count +=1
else:
err_str = "UPLO must be either 'F','L', or 'U'\n"
err_str+= "however, provided UPLO=%s"%UPLO
raise ValueError(err_str)
return v
def vecsym(v):
"""
returns a full symmetric matrix filled
the distinct elements of v, filled row-wise
"""
from .globalfuncs import zeros
Nv = v.size
N = (int(numpy.sqrt(1 + 8*Nv)) - 1)//2
A = zeros( (N,N), dtype=v)
count = 0
for row in range(N):
for col in range(row,N):
A[row,col] = A[col,row] = v[count]
count +=1
return A
def piv2mat(piv):
"""
convert a pivot indices as returned by scipy.linalg.lu_factor into
a permutation matrix
"""
N = len(piv)
swap = numpy.arange(N)
for i in range(N):
tmp = swap[i]
swap[i] = swap[piv[i]]
swap[piv[i]] = tmp
return numpy.eye(N)[:, swap]
def piv2det(piv):
"""
computes the determinant of the permutation matrix that is defined by pivot indices as returned by scipy.linalg.lu_factor
"""
N = len(piv)
piv = numpy.array(piv)
# print piv != numpy.arange(N)
return (-1)**(numpy.sum(piv != numpy.arange(N))%2) | 0.571049 | 0.649829 |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, Http404
from .forms import VacancyAddForm, ApplicantProfileEdit, EmployerProfileEdit, sortChoice
from .models import ApplicantProfile, EmployerProfile, Vacancy
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout
from django.contrib.auth.models import Group, User
from django.contrib.auth.decorators import login_required
from django.db.models import Q
def EmplRegisterView(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
group = Group.objects.get(name = 'Employers')
user.groups.add(group)
login(request, user)
return redirect('ProfileSetup')
else:
if request.user.is_authenticated:
logout(request)
form = UserCreationForm()
context = {
'form':form,
}
return render(request, "registerPage.html", context)
def ApplRegisterView(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
group = Group.objects.get(name = 'Job seekers')
user.groups.add(group)
login(request, user)
return redirect('ProfileSetup')
else:
if request.user.is_authenticated:
logout(request)
form = UserCreationForm()
context = {
'form':form,
}
return render(request, "regappl.html", context)
def loginView(request):
if request.method == 'POST':
form = AuthenticationForm(data = request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect('profile')
else:
form = AuthenticationForm()
context = {
'form':form,
}
return render(request, 'loginPage.html', context)
def logoutView(request):
if request.method == 'POST':
logout(request)
return redirect('home')
@login_required(login_url = 'login')
def vacancyAddView(request):
if request.method == 'POST':
form = VacancyAddForm(request.POST)
if form.is_valid():
form.instance.company = request.user.employerprofile
form.save()
return redirect('profile')
else:
form = VacancyAddForm()
context = {
'form':form
}
return render(request, "addVacancy.html", context)
@login_required(login_url = 'login')
def profileView(request):
if request.user.groups.filter(name = 'Job seekers'):
obj = ApplicantProfile.objects.get(username = request.user)
return render(request, "applicantProfile.html", {'obj' : obj})
else:
obj = EmployerProfile.objects.get(username = request.user)
return render(request, "employerProfile.html", {'obj' : obj})
@login_required(login_url = 'login')
def profileSetupView(request):
if request.user.groups.filter(name = 'Job seekers'):
if request.method == 'POST':
form = ApplicantProfileEdit(request.POST)
if form.is_valid():
form.instance.username = request.user
form.save()
return redirect('profile')
else:
form = ApplicantProfileEdit()
else:
if request.method == 'POST':
form = EmployerProfileEdit(request.POST)
if form.is_valid():
form.instance.username = request.user
form.save()
return redirect('profile')
else:
form = EmployerProfileEdit()
context = {
'form':form
}
return render(request, "ProfileSetup.html", context)
@login_required(login_url = 'login')
def profileUpdateView(request):
if request.user.groups.filter(name = 'Job seekers'):
thisInstance = ApplicantProfile.objects.get(username = request.user)
if request.method == 'POST':
form = ApplicantProfileEdit(request.POST, instance = thisInstance)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = ApplicantProfileEdit(instance = thisInstance)
else:
thisInstance = EmployerProfile.objects.get(username = request.user)
if request.method == 'POST':
form = EmployerProfileEdit(request.POST, instance = thisInstance)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = EmployerProfileEdit(instance = thisInstance)
context = {
'form':form
}
return render(request, "ProfileUpdate.html", context)
def homeView(request):
return render(request, "home.html", {})
@login_required(login_url = 'login')
def dynamicVacancyView(request, id):
obj = get_object_or_404(Vacancy, id = id)
oldDate = obj.creationDate
obj.viewsAmount += 1
obj.creationDate = oldDate
obj.save()
context = {
'obj' : obj
}
return render(request, "vacancy.html", context)
@login_required(login_url = 'login')
def vacancyListView(request):
searchQueryNavbar = request.GET.get('search_navbar', '')
searchQueryVLpage = request.GET.get('search_vlpage', '')
form = sortChoice(request.GET or request.POST)
print(request.GET)
if searchQueryNavbar or searchQueryVLpage:
if searchQueryNavbar:
searchQuery = searchQueryNavbar
else:
searchQuery = searchQueryVLpage
if form.is_valid():
selected = form.cleaned_data.get("choice")
if selected == 'viewsAmount':
queryset = Vacancy.objects.filter(Q(name__icontains = searchQuery) | Q(salary__icontains = searchQuery) | Q(competences__icontains = searchQuery)).order_by('-viewsAmount')
if selected == 'creationDate':
queryset = Vacancy.objects.filter(Q(name__icontains = searchQuery) | Q(salary__icontains = searchQuery) | Q(competences__icontains = searchQuery)).order_by('-creationDate')
else:
queryset = Vacancy.objects.all().order_by('-creationDate')
context = {
'objectList':queryset,
'form':form
}
return render(request, "vacancyList.html", context) | swf/workfair/views.py | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, Http404
from .forms import VacancyAddForm, ApplicantProfileEdit, EmployerProfileEdit, sortChoice
from .models import ApplicantProfile, EmployerProfile, Vacancy
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout
from django.contrib.auth.models import Group, User
from django.contrib.auth.decorators import login_required
from django.db.models import Q
def EmplRegisterView(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
group = Group.objects.get(name = 'Employers')
user.groups.add(group)
login(request, user)
return redirect('ProfileSetup')
else:
if request.user.is_authenticated:
logout(request)
form = UserCreationForm()
context = {
'form':form,
}
return render(request, "registerPage.html", context)
def ApplRegisterView(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
group = Group.objects.get(name = 'Job seekers')
user.groups.add(group)
login(request, user)
return redirect('ProfileSetup')
else:
if request.user.is_authenticated:
logout(request)
form = UserCreationForm()
context = {
'form':form,
}
return render(request, "regappl.html", context)
def loginView(request):
if request.method == 'POST':
form = AuthenticationForm(data = request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect('profile')
else:
form = AuthenticationForm()
context = {
'form':form,
}
return render(request, 'loginPage.html', context)
def logoutView(request):
if request.method == 'POST':
logout(request)
return redirect('home')
@login_required(login_url = 'login')
def vacancyAddView(request):
if request.method == 'POST':
form = VacancyAddForm(request.POST)
if form.is_valid():
form.instance.company = request.user.employerprofile
form.save()
return redirect('profile')
else:
form = VacancyAddForm()
context = {
'form':form
}
return render(request, "addVacancy.html", context)
@login_required(login_url = 'login')
def profileView(request):
if request.user.groups.filter(name = 'Job seekers'):
obj = ApplicantProfile.objects.get(username = request.user)
return render(request, "applicantProfile.html", {'obj' : obj})
else:
obj = EmployerProfile.objects.get(username = request.user)
return render(request, "employerProfile.html", {'obj' : obj})
@login_required(login_url = 'login')
def profileSetupView(request):
if request.user.groups.filter(name = 'Job seekers'):
if request.method == 'POST':
form = ApplicantProfileEdit(request.POST)
if form.is_valid():
form.instance.username = request.user
form.save()
return redirect('profile')
else:
form = ApplicantProfileEdit()
else:
if request.method == 'POST':
form = EmployerProfileEdit(request.POST)
if form.is_valid():
form.instance.username = request.user
form.save()
return redirect('profile')
else:
form = EmployerProfileEdit()
context = {
'form':form
}
return render(request, "ProfileSetup.html", context)
@login_required(login_url = 'login')
def profileUpdateView(request):
if request.user.groups.filter(name = 'Job seekers'):
thisInstance = ApplicantProfile.objects.get(username = request.user)
if request.method == 'POST':
form = ApplicantProfileEdit(request.POST, instance = thisInstance)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = ApplicantProfileEdit(instance = thisInstance)
else:
thisInstance = EmployerProfile.objects.get(username = request.user)
if request.method == 'POST':
form = EmployerProfileEdit(request.POST, instance = thisInstance)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = EmployerProfileEdit(instance = thisInstance)
context = {
'form':form
}
return render(request, "ProfileUpdate.html", context)
def homeView(request):
return render(request, "home.html", {})
@login_required(login_url = 'login')
def dynamicVacancyView(request, id):
obj = get_object_or_404(Vacancy, id = id)
oldDate = obj.creationDate
obj.viewsAmount += 1
obj.creationDate = oldDate
obj.save()
context = {
'obj' : obj
}
return render(request, "vacancy.html", context)
@login_required(login_url = 'login')
def vacancyListView(request):
searchQueryNavbar = request.GET.get('search_navbar', '')
searchQueryVLpage = request.GET.get('search_vlpage', '')
form = sortChoice(request.GET or request.POST)
print(request.GET)
if searchQueryNavbar or searchQueryVLpage:
if searchQueryNavbar:
searchQuery = searchQueryNavbar
else:
searchQuery = searchQueryVLpage
if form.is_valid():
selected = form.cleaned_data.get("choice")
if selected == 'viewsAmount':
queryset = Vacancy.objects.filter(Q(name__icontains = searchQuery) | Q(salary__icontains = searchQuery) | Q(competences__icontains = searchQuery)).order_by('-viewsAmount')
if selected == 'creationDate':
queryset = Vacancy.objects.filter(Q(name__icontains = searchQuery) | Q(salary__icontains = searchQuery) | Q(competences__icontains = searchQuery)).order_by('-creationDate')
else:
queryset = Vacancy.objects.all().order_by('-creationDate')
context = {
'objectList':queryset,
'form':form
}
return render(request, "vacancyList.html", context) | 0.27406 | 0.062046 |
from __future__ import annotations
import logging
import shutil
import tarfile
import tempfile
import uuid
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from typing import Text, ContextManager, Tuple, Union
import rasa.utils.common
import rasa.shared.utils.io
from rasa.engine.storage.storage import ModelMetadata, ModelStorage
from rasa.engine.storage.resource import Resource
from rasa.shared.core.domain import Domain
from rasa.engine.graph import GraphSchema
logger = logging.getLogger(__name__)
# Paths within model archive
MODEL_ARCHIVE_COMPONENTS_DIR = "components"
MODEL_ARCHIVE_TRAIN_SCHEMA_FILE = "train_schema.yml"
MODEL_ARCHIVE_PREDICT_SCHEMA_FILE = "predict_schema.yml"
MODEL_ARCHIVE_METADATA_FILE = "metadata.json"
class LocalModelStorage(ModelStorage):
"""Stores and provides output of `GraphComponents` on local disk."""
def __init__(self, storage_path: Path) -> None:
"""Creates storage (see parent class for full docstring)."""
self._storage_path = storage_path
@classmethod
def create(cls, storage_path: Path) -> ModelStorage:
"""Creates a new instance (see parent class for full docstring)."""
return cls(storage_path)
@classmethod
def from_model_archive(
cls, storage_path: Path, model_archive_path: Union[Text, Path]
) -> Tuple[LocalModelStorage, ModelMetadata]:
"""Initializes storage from archive (see parent class for full docstring)."""
if next(storage_path.glob("*"), None):
raise ValueError(
f"The model storage with path '{storage_path}' is "
f"not empty. You can only unpack model archives into an "
f"empty model storage."
)
with tempfile.TemporaryDirectory() as temporary_directory:
temporary_directory = Path(temporary_directory)
cls._extract_archive_to_directory(model_archive_path, temporary_directory)
logger.debug(f"Extracted model to '{temporary_directory}'.")
cls._initialize_model_storage_from_model_archive(
temporary_directory, storage_path
)
metadata = cls._load_metadata(temporary_directory)
return (
cls(storage_path),
metadata,
)
@staticmethod
def _extract_archive_to_directory(
model_archive_path: Union[Text, Path], temporary_directory: Union[Text, Path],
) -> None:
with tarfile.open(model_archive_path, mode="r:gz") as tar:
tar.extractall(temporary_directory)
@staticmethod
def _initialize_model_storage_from_model_archive(
temporary_directory: Path, storage_path: Path
) -> None:
for path in (temporary_directory / MODEL_ARCHIVE_COMPONENTS_DIR).glob("*"):
shutil.move(
str(path), str(storage_path),
)
@staticmethod
def _load_metadata(directory: Path) -> ModelMetadata:
serialized_metadata = rasa.shared.utils.io.read_json_file(
directory / MODEL_ARCHIVE_METADATA_FILE
)
return ModelMetadata.from_dict(serialized_metadata)
@contextmanager
def write_to(self, resource: Resource) -> ContextManager[Path]:
"""Persists data for a resource (see parent class for full docstring)."""
logger.debug(f"Resource '{resource.name}' was requested for writing.")
directory = self._directory_for_resource(resource)
if not directory.exists():
directory.mkdir()
yield directory
logger.debug(f"Resource '{resource.name}' was persisted.")
def _directory_for_resource(self, resource: Resource) -> Path:
return self._storage_path / resource.name
@contextmanager
def read_from(self, resource: Resource) -> ContextManager[Path]:
"""Provides the data of a `Resource` (see parent class for full docstring)."""
logger.debug(f"Resource '{resource.name}' was requested for reading.")
directory = self._directory_for_resource(resource)
if not directory.exists():
raise ValueError(
f"Resource '{resource.name}' does not exist. Please make "
f"sure that the graph component providing the resource "
f"is a parent node of the current graph node "
f"(in case this happens during training) or that the "
f"resource was actually persisted during training "
f"(in case this happens during inference)."
)
yield directory
def create_model_package(
self,
model_archive_path: Union[Text, Path],
train_schema: GraphSchema,
predict_schema: GraphSchema,
domain: Domain,
) -> ModelMetadata:
"""Creates model package (see parent class for full docstring)."""
logger.debug(f"Start to created model package for path '{model_archive_path}'.")
with tempfile.TemporaryDirectory() as temp_dir:
temporary_directory = Path(temp_dir)
shutil.copytree(
self._storage_path, temporary_directory / MODEL_ARCHIVE_COMPONENTS_DIR
)
model_metadata = self._create_model_metadata(
domain, predict_schema, train_schema
)
self._persist_metadata(model_metadata, temporary_directory)
with tarfile.open(model_archive_path, "w:gz") as tar:
tar.add(temporary_directory, arcname="")
logger.debug(f"Model package created in path '{model_archive_path}'.")
return model_metadata
@staticmethod
def _persist_metadata(metadata: ModelMetadata, temporary_directory: Path,) -> None:
rasa.shared.utils.io.dump_obj_as_json_to_file(
temporary_directory / MODEL_ARCHIVE_METADATA_FILE, metadata.as_dict()
)
@staticmethod
def _create_model_metadata(
domain: Domain, predict_schema: GraphSchema, train_schema: GraphSchema
) -> ModelMetadata:
return ModelMetadata(
trained_at=datetime.utcnow(),
rasa_open_source_version=rasa.__version__,
model_id=uuid.uuid4().hex,
domain=domain,
train_schema=train_schema,
predict_schema=predict_schema,
) | rasa/engine/storage/local_model_storage.py | from __future__ import annotations
import logging
import shutil
import tarfile
import tempfile
import uuid
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from typing import Text, ContextManager, Tuple, Union
import rasa.utils.common
import rasa.shared.utils.io
from rasa.engine.storage.storage import ModelMetadata, ModelStorage
from rasa.engine.storage.resource import Resource
from rasa.shared.core.domain import Domain
from rasa.engine.graph import GraphSchema
logger = logging.getLogger(__name__)
# Paths within model archive
MODEL_ARCHIVE_COMPONENTS_DIR = "components"
MODEL_ARCHIVE_TRAIN_SCHEMA_FILE = "train_schema.yml"
MODEL_ARCHIVE_PREDICT_SCHEMA_FILE = "predict_schema.yml"
MODEL_ARCHIVE_METADATA_FILE = "metadata.json"
class LocalModelStorage(ModelStorage):
"""Stores and provides output of `GraphComponents` on local disk."""
def __init__(self, storage_path: Path) -> None:
"""Creates storage (see parent class for full docstring)."""
self._storage_path = storage_path
@classmethod
def create(cls, storage_path: Path) -> ModelStorage:
"""Creates a new instance (see parent class for full docstring)."""
return cls(storage_path)
@classmethod
def from_model_archive(
cls, storage_path: Path, model_archive_path: Union[Text, Path]
) -> Tuple[LocalModelStorage, ModelMetadata]:
"""Initializes storage from archive (see parent class for full docstring)."""
if next(storage_path.glob("*"), None):
raise ValueError(
f"The model storage with path '{storage_path}' is "
f"not empty. You can only unpack model archives into an "
f"empty model storage."
)
with tempfile.TemporaryDirectory() as temporary_directory:
temporary_directory = Path(temporary_directory)
cls._extract_archive_to_directory(model_archive_path, temporary_directory)
logger.debug(f"Extracted model to '{temporary_directory}'.")
cls._initialize_model_storage_from_model_archive(
temporary_directory, storage_path
)
metadata = cls._load_metadata(temporary_directory)
return (
cls(storage_path),
metadata,
)
@staticmethod
def _extract_archive_to_directory(
model_archive_path: Union[Text, Path], temporary_directory: Union[Text, Path],
) -> None:
with tarfile.open(model_archive_path, mode="r:gz") as tar:
tar.extractall(temporary_directory)
@staticmethod
def _initialize_model_storage_from_model_archive(
temporary_directory: Path, storage_path: Path
) -> None:
for path in (temporary_directory / MODEL_ARCHIVE_COMPONENTS_DIR).glob("*"):
shutil.move(
str(path), str(storage_path),
)
@staticmethod
def _load_metadata(directory: Path) -> ModelMetadata:
serialized_metadata = rasa.shared.utils.io.read_json_file(
directory / MODEL_ARCHIVE_METADATA_FILE
)
return ModelMetadata.from_dict(serialized_metadata)
@contextmanager
def write_to(self, resource: Resource) -> ContextManager[Path]:
"""Persists data for a resource (see parent class for full docstring)."""
logger.debug(f"Resource '{resource.name}' was requested for writing.")
directory = self._directory_for_resource(resource)
if not directory.exists():
directory.mkdir()
yield directory
logger.debug(f"Resource '{resource.name}' was persisted.")
def _directory_for_resource(self, resource: Resource) -> Path:
return self._storage_path / resource.name
@contextmanager
def read_from(self, resource: Resource) -> ContextManager[Path]:
"""Provides the data of a `Resource` (see parent class for full docstring)."""
logger.debug(f"Resource '{resource.name}' was requested for reading.")
directory = self._directory_for_resource(resource)
if not directory.exists():
raise ValueError(
f"Resource '{resource.name}' does not exist. Please make "
f"sure that the graph component providing the resource "
f"is a parent node of the current graph node "
f"(in case this happens during training) or that the "
f"resource was actually persisted during training "
f"(in case this happens during inference)."
)
yield directory
def create_model_package(
self,
model_archive_path: Union[Text, Path],
train_schema: GraphSchema,
predict_schema: GraphSchema,
domain: Domain,
) -> ModelMetadata:
"""Creates model package (see parent class for full docstring)."""
logger.debug(f"Start to created model package for path '{model_archive_path}'.")
with tempfile.TemporaryDirectory() as temp_dir:
temporary_directory = Path(temp_dir)
shutil.copytree(
self._storage_path, temporary_directory / MODEL_ARCHIVE_COMPONENTS_DIR
)
model_metadata = self._create_model_metadata(
domain, predict_schema, train_schema
)
self._persist_metadata(model_metadata, temporary_directory)
with tarfile.open(model_archive_path, "w:gz") as tar:
tar.add(temporary_directory, arcname="")
logger.debug(f"Model package created in path '{model_archive_path}'.")
return model_metadata
@staticmethod
def _persist_metadata(metadata: ModelMetadata, temporary_directory: Path,) -> None:
rasa.shared.utils.io.dump_obj_as_json_to_file(
temporary_directory / MODEL_ARCHIVE_METADATA_FILE, metadata.as_dict()
)
@staticmethod
def _create_model_metadata(
domain: Domain, predict_schema: GraphSchema, train_schema: GraphSchema
) -> ModelMetadata:
return ModelMetadata(
trained_at=datetime.utcnow(),
rasa_open_source_version=rasa.__version__,
model_id=uuid.uuid4().hex,
domain=domain,
train_schema=train_schema,
predict_schema=predict_schema,
) | 0.883958 | 0.195498 |